repo_name stringlengths 5 108 | path stringlengths 6 333 | size stringlengths 1 6 | content stringlengths 4 977k | license stringclasses 15
values |
|---|---|---|---|---|
derekhiggins/ovirt-engine | backend/manager/modules/bll/src/test/java/org/ovirt/engine/core/bll/storage/GetStoragePoolByIdQueryTest.java | 1734 | package org.ovirt.engine.core.bll.storage;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import org.junit.Test;
import org.ovirt.engine.core.bll.AbstractUserQueryTest;
import org.ovirt.engine.core.common.businessentities.storage_pool;
import org.ovirt.engine.core.common.queries.StoragePoolQueryParametersBase;
import org.ovirt.engine.core.compat.Guid;
import org.ovirt.engine.core.dal.dbbroker.DbFacade;
import org.ovirt.engine.core.dao.StoragePoolDAO;
/**
* A test case for {@link GetStoragePoolByIdQuery}.
* It does not test database implementation, but rather tests that the right delegations to the DAO occur.
*/
public class GetStoragePoolByIdQueryTest extends AbstractUserQueryTest<StoragePoolQueryParametersBase, GetStoragePoolByIdQuery<StoragePoolQueryParametersBase>> {
@Test
public void testExecuteQuery() {
Guid storagePoolID = Guid.NewGuid();
storage_pool expectedResult = mock(storage_pool.class);
StoragePoolQueryParametersBase paramsMock = getQueryParameters();
when(paramsMock.getStoragePoolId()).thenReturn(storagePoolID);
StoragePoolDAO storagePoolDAOMock = mock(StoragePoolDAO.class);
when(storagePoolDAOMock.get(storagePoolID, getUser().getUserId(), paramsMock.isFiltered())).thenReturn(expectedResult);
DbFacade dbFacadeMock = getDbFacadeMockInstance();
when(dbFacadeMock.getStoragePoolDAO()).thenReturn(storagePoolDAOMock);
getQuery().executeQueryCommand();
storage_pool result = (storage_pool) getQuery().getQueryReturnValue().getReturnValue();
assertEquals("Wrong storage pool returned", expectedResult, result);
}
}
| apache-2.0 |
lyandyjoe/slogan-taoxian | Wallapop/src/com/example/wallapop/utils/imageloader/cache/memory/MemoryCache.java | 1523 | /*******************************************************************************
* Copyright 2014 Sergey Tarasevich
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
package com.example.wallapop.utils.imageloader.cache.memory;
import java.util.Collection;
import android.graphics.Bitmap;
/**
* Interface for memory cache
*
* @author Sergey Tarasevich (nostra13[at]gmail[dot]com)
* @since 1.9.2
*/
public interface MemoryCache {
/**
* Puts value into cache by key
*
* @return <b>true</b> - if value was put into cache successfully, <b>false</b> - if value was <b>not</b> put into
* cache
*/
boolean put(String key, Bitmap value);
/** Returns value by key. If there is no value for key then null will be returned. */
Bitmap get(String key);
/** Removes item by key */
Bitmap remove(String key);
/** Returns all keys of cache */
Collection<String> keys();
/** Remove all items from cache */
void clear();
}
| apache-2.0 |
jk1/intellij-community | python/src/com/jetbrains/python/psi/PyUtil.java | 79893 | // Copyright 2000-2017 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.jetbrains.python.psi;
import com.google.common.collect.Collections2;
import com.google.common.collect.Maps;
import com.intellij.application.options.CodeStyle;
import com.intellij.codeInsight.FileModificationService;
import com.intellij.codeInsight.completion.PrioritizedLookupElement;
import com.intellij.codeInsight.lookup.LookupElement;
import com.intellij.codeInsight.lookup.LookupElementBuilder;
import com.intellij.ide.fileTemplates.FileTemplate;
import com.intellij.ide.fileTemplates.FileTemplateManager;
import com.intellij.ide.scratch.ScratchFileService;
import com.intellij.injected.editor.VirtualFileWindow;
import com.intellij.lang.ASTFactory;
import com.intellij.lang.ASTNode;
import com.intellij.openapi.application.ApplicationManager;
import com.intellij.openapi.application.impl.ApplicationImpl;
import com.intellij.openapi.editor.Document;
import com.intellij.openapi.editor.Editor;
import com.intellij.openapi.editor.EditorFactory;
import com.intellij.openapi.editor.ex.EditorEx;
import com.intellij.openapi.editor.highlighter.EditorHighlighter;
import com.intellij.openapi.editor.highlighter.EditorHighlighterFactory;
import com.intellij.openapi.extensions.Extensions;
import com.intellij.openapi.module.Module;
import com.intellij.openapi.module.ModuleManager;
import com.intellij.openapi.module.ModuleUtilCore;
import com.intellij.openapi.progress.ProgressIndicator;
import com.intellij.openapi.progress.ProgressManager;
import com.intellij.openapi.progress.Task;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.projectRoots.Sdk;
import com.intellij.openapi.roots.ModuleRootManager;
import com.intellij.openapi.ui.MessageType;
import com.intellij.openapi.ui.popup.Balloon;
import com.intellij.openapi.ui.popup.JBPopupFactory;
import com.intellij.openapi.util.TextRange;
import com.intellij.openapi.util.io.FileUtil;
import com.intellij.openapi.util.io.FileUtilRt;
import com.intellij.openapi.util.text.StringUtil;
import com.intellij.openapi.vfs.LocalFileSystem;
import com.intellij.openapi.vfs.VirtualFile;
import com.intellij.openapi.wm.WindowManager;
import com.intellij.psi.*;
import com.intellij.psi.stubs.StubElement;
import com.intellij.psi.util.*;
import com.intellij.ui.awt.RelativePoint;
import com.intellij.util.*;
import com.intellij.util.containers.ContainerUtil;
import com.jetbrains.NotNullPredicate;
import com.jetbrains.python.PyBundle;
import com.jetbrains.python.PyNames;
import com.jetbrains.python.PyTokenTypes;
import com.jetbrains.python.codeInsight.completion.OverwriteEqualsInsertHandler;
import com.jetbrains.python.codeInsight.controlflow.ScopeOwner;
import com.jetbrains.python.codeInsight.dataflow.scope.ScopeUtil;
import com.jetbrains.python.formatter.PyCodeStyleSettings;
import com.jetbrains.python.magicLiteral.PyMagicLiteralTools;
import com.jetbrains.python.psi.impl.*;
import com.jetbrains.python.psi.resolve.PyResolveContext;
import com.jetbrains.python.psi.resolve.QualifiedNameFinder;
import com.jetbrains.python.psi.resolve.RatedResolveResult;
import com.jetbrains.python.psi.stubs.PySetuptoolsNamespaceIndex;
import com.jetbrains.python.psi.types.*;
import com.jetbrains.python.refactoring.classes.PyDependenciesComparator;
import com.jetbrains.python.refactoring.classes.extractSuperclass.PyExtractSuperclassHelper;
import com.jetbrains.python.refactoring.classes.membersManager.PyMemberInfo;
import com.jetbrains.python.sdk.PythonSdkType;
import one.util.streamex.StreamEx;
import org.jetbrains.annotations.*;
import javax.swing.*;
import java.awt.*;
import java.io.File;
import java.io.IOException;
import java.util.*;
import java.util.List;
import static com.jetbrains.python.psi.PyFunction.Modifier.CLASSMETHOD;
import static com.jetbrains.python.psi.PyFunction.Modifier.STATICMETHOD;
public class PyUtil {
private static final boolean VERBOSE_MODE = System.getenv().get("_PYCHARM_VERBOSE_MODE") != null;
private PyUtil() {
}
/**
* @see PyUtil#flattenedParensAndTuples
*/
protected static List<PyExpression> unfoldParentheses(PyExpression[] targets, List<PyExpression> receiver,
boolean unfoldListLiterals, boolean unfoldStarExpressions) {
// NOTE: this proliferation of instanceofs is not very beautiful. Maybe rewrite using a visitor.
for (PyExpression exp : targets) {
if (exp instanceof PyParenthesizedExpression) {
final PyParenthesizedExpression parenExpr = (PyParenthesizedExpression)exp;
unfoldParentheses(new PyExpression[]{parenExpr.getContainedExpression()}, receiver, unfoldListLiterals, unfoldStarExpressions);
}
else if (exp instanceof PyTupleExpression) {
final PyTupleExpression tupleExpr = (PyTupleExpression)exp;
unfoldParentheses(tupleExpr.getElements(), receiver, unfoldListLiterals, unfoldStarExpressions);
}
else if (exp instanceof PyListLiteralExpression && unfoldListLiterals) {
final PyListLiteralExpression listLiteral = (PyListLiteralExpression)exp;
unfoldParentheses(listLiteral.getElements(), receiver, true, unfoldStarExpressions);
}
else if (exp instanceof PyStarExpression && unfoldStarExpressions) {
unfoldParentheses(new PyExpression[]{((PyStarExpression)exp).getExpression()}, receiver, unfoldListLiterals, true);
}
else if (exp != null) {
receiver.add(exp);
}
}
return receiver;
}
/**
* Flattens the representation of every element in targets, and puts all results together.
* Elements of every tuple nested in target item are brought to the top level: (a, (b, (c, d))) -> (a, b, c, d)
* Typical usage: {@code flattenedParensAndTuples(some_tuple.getExpressions())}.
*
* @param targets target elements.
* @return the list of flattened expressions.
*/
@NotNull
public static List<PyExpression> flattenedParensAndTuples(PyExpression... targets) {
return unfoldParentheses(targets, new ArrayList<>(targets.length), false, false);
}
@NotNull
public static List<PyExpression> flattenedParensAndLists(PyExpression... targets) {
return unfoldParentheses(targets, new ArrayList<>(targets.length), true, true);
}
@NotNull
public static List<PyExpression> flattenedParensAndStars(PyExpression... targets) {
return unfoldParentheses(targets, new ArrayList<>(targets.length), false, true);
}
/**
* Produce a reasonable representation of a PSI element, good for debugging.
*
* @param elt element to represent; nulls and invalid nodes are ok.
* @param cutAtEOL if true, representation stops at nearest EOL inside the element.
* @return the representation.
*/
@NotNull
@NonNls
public static String getReadableRepr(PsiElement elt, final boolean cutAtEOL) {
if (elt == null) return "null!";
ASTNode node = elt.getNode();
if (node == null) {
return "null";
}
else {
String s = node.getText();
int cut_pos;
if (cutAtEOL) {
cut_pos = s.indexOf('\n');
}
else {
cut_pos = -1;
}
if (cut_pos < 0) cut_pos = s.length();
return s.substring(0, Math.min(cut_pos, s.length()));
}
}
@Nullable
public static PyClass getContainingClassOrSelf(final PsiElement element) {
PsiElement current = element;
while (current != null && !(current instanceof PyClass)) {
current = current.getParent();
}
return (PyClass)current;
}
/**
* @param element for which to obtain the file
* @return PyFile, or null, if there's no containing file, or it is not a PyFile.
*/
@Nullable
public static PyFile getContainingPyFile(PyElement element) {
final PsiFile containingFile = element.getContainingFile();
return containingFile instanceof PyFile ? (PyFile)containingFile : null;
}
/**
* Shows an information balloon in a reasonable place at the top right of the window.
*
* @param project our project
* @param message the text, HTML markup allowed
* @param messageType message type, changes the icon and the background.
*/
// TODO: move to a better place
public static void showBalloon(Project project, String message, MessageType messageType) {
// ripped from com.intellij.openapi.vcs.changes.ui.ChangesViewBalloonProblemNotifier
final JFrame frame = WindowManager.getInstance().getFrame(project.isDefault() ? null : project);
if (frame == null) return;
final JComponent component = frame.getRootPane();
if (component == null) return;
final Rectangle rect = component.getVisibleRect();
final Point p = new Point(rect.x + rect.width - 10, rect.y + 10);
final RelativePoint point = new RelativePoint(component, p);
JBPopupFactory.getInstance().createHtmlTextBalloonBuilder(message, messageType.getDefaultIcon(), messageType.getPopupBackground(), null)
.setShowCallout(false).setCloseButtonEnabled(true)
.createBalloon().show(point, Balloon.Position.atLeft);
}
@NonNls
/**
* Returns a quoted string representation, or "null".
*/
public static String nvl(Object s) {
if (s != null) {
return "'" + s.toString() + "'";
}
else {
return "null";
}
}
/**
* Adds an item into a comma-separated list in a PSI tree. E.g. can turn "foo, bar" into "foo, bar, baz", adding commas as needed.
*
* @param parent the element to represent the list; we're adding a child to it.
* @param newItem the element we're inserting (the "baz" in the example).
* @param beforeThis node to mark the insertion point inside the list; must belong to a child of target. Set to null to add first element.
* @param isFirst true if we don't need a comma before the element we're adding.
* @param isLast true if we don't need a comma after the element we're adding.
*/
public static void addListNode(PsiElement parent, PsiElement newItem, ASTNode beforeThis,
boolean isFirst, boolean isLast, boolean addWhitespace) {
if (!FileModificationService.getInstance().preparePsiElementForWrite(parent)) {
return;
}
ASTNode node = parent.getNode();
assert node != null;
ASTNode itemNode = newItem.getNode();
assert itemNode != null;
Project project = parent.getProject();
PyElementGenerator gen = PyElementGenerator.getInstance(project);
if (!isFirst) node.addChild(gen.createComma(), beforeThis);
node.addChild(itemNode, beforeThis);
if (!isLast) node.addChild(gen.createComma(), beforeThis);
if (addWhitespace) node.addChild(ASTFactory.whitespace(" "), beforeThis);
}
// TODO: move to a more proper place?
/**
* Determine the type of a special attribute. Currently supported: {@code __class__} and {@code __dict__}.
*
* @param ref reference to a possible attribute; only qualified references make sense.
* @return type, or null (if type cannot be determined, reference is not to a known attribute, etc.)
*/
@Nullable
public static PyType getSpecialAttributeType(@Nullable PyReferenceExpression ref, TypeEvalContext context) {
if (ref != null) {
PyExpression qualifier = ref.getQualifier();
if (qualifier != null) {
String attr_name = ref.getReferencedName();
if (PyNames.__CLASS__.equals(attr_name)) {
PyType qualifierType = context.getType(qualifier);
if (qualifierType instanceof PyClassType) {
return new PyClassTypeImpl(((PyClassType)qualifierType).getPyClass(), true); // always as class, never instance
}
}
else if (PyNames.DICT.equals(attr_name)) {
PyType qualifierType = context.getType(qualifier);
if (qualifierType instanceof PyClassType && ((PyClassType)qualifierType).isDefinition()) {
return PyBuiltinCache.getInstance(ref).getDictType();
}
}
}
}
return null;
}
/**
* Makes sure that 'thing' is not null; else throws an {@link IncorrectOperationException}.
*
* @param thing what we check.
* @return thing, if not null.
*/
@NotNull
public static <T> T sure(T thing) {
if (thing == null) throw new IncorrectOperationException();
return thing;
}
/**
* Makes sure that the 'thing' is true; else throws an {@link IncorrectOperationException}.
*
* @param thing what we check.
*/
public static void sure(boolean thing) {
if (!thing) throw new IncorrectOperationException();
}
public static boolean isAttribute(PyTargetExpression ex) {
return isInstanceAttribute(ex) || isClassAttribute(ex);
}
public static boolean isInstanceAttribute(PyExpression target) {
if (!(target instanceof PyTargetExpression)) {
return false;
}
final ScopeOwner owner = ScopeUtil.getScopeOwner(target);
if (owner instanceof PyFunction) {
final PyFunction method = (PyFunction)owner;
if (method.getContainingClass() != null) {
if (method.getStub() != null) {
return true;
}
final PyParameter[] params = method.getParameterList().getParameters();
if (params.length > 0) {
final PyTargetExpression targetExpr = (PyTargetExpression)target;
final PyExpression qualifier = targetExpr.getQualifier();
return qualifier != null && qualifier.getText().equals(params[0].getName());
}
}
}
return false;
}
public static boolean isClassAttribute(PsiElement element) {
return element instanceof PyTargetExpression && ScopeUtil.getScopeOwner(element) instanceof PyClass;
}
public static boolean isIfNameEqualsMain(PyIfStatement ifStatement) {
final PyExpression condition = ifStatement.getIfPart().getCondition();
return isNameEqualsMain(condition);
}
private static boolean isNameEqualsMain(PyExpression condition) {
if (condition instanceof PyParenthesizedExpression) {
return isNameEqualsMain(((PyParenthesizedExpression)condition).getContainedExpression());
}
if (condition instanceof PyBinaryExpression) {
PyBinaryExpression binaryExpression = (PyBinaryExpression)condition;
if (binaryExpression.getOperator() == PyTokenTypes.OR_KEYWORD) {
return isNameEqualsMain(binaryExpression.getLeftExpression()) || isNameEqualsMain(binaryExpression.getRightExpression());
}
final PyExpression rhs = binaryExpression.getRightExpression();
return binaryExpression.getOperator() == PyTokenTypes.EQEQ &&
binaryExpression.getLeftExpression().getText().equals(PyNames.NAME) &&
rhs != null && rhs.getText().contains("__main__");
}
return false;
}
/**
* Searches for a method wrapping given element.
*
* @param start element presumably inside a method
* @param deep if true, allow 'start' to be inside functions nested in a method; else, 'start' must be directly inside a method.
* @return if not 'deep', [0] is the method and [1] is the class; if 'deep', first several elements may be the nested functions,
* the last but one is the method, and the last is the class.
*/
@Nullable
public static List<PsiElement> searchForWrappingMethod(PsiElement start, boolean deep) {
PsiElement seeker = start;
List<PsiElement> ret = new ArrayList<>(2);
while (seeker != null) {
PyFunction func = PsiTreeUtil.getParentOfType(seeker, PyFunction.class, true, PyClass.class);
if (func != null) {
PyClass cls = func.getContainingClass();
if (cls != null) {
ret.add(func);
ret.add(cls);
return ret;
}
else if (deep) {
ret.add(func);
seeker = func;
}
else {
return null; // no immediate class
}
}
else {
return null; // no function
}
}
return null;
}
public static boolean inSameFile(@NotNull PsiElement e1, @NotNull PsiElement e2) {
final PsiFile f1 = e1.getContainingFile();
final PsiFile f2 = e2.getContainingFile();
if (f1 == null || f2 == null) {
return false;
}
return f1 == f2;
}
public static boolean onSameLine(@NotNull PsiElement e1, @NotNull PsiElement e2) {
final PsiDocumentManager documentManager = PsiDocumentManager.getInstance(e1.getProject());
final Document document = documentManager.getDocument(e1.getContainingFile());
if (document == null || document != documentManager.getDocument(e2.getContainingFile())) {
return false;
}
return document.getLineNumber(e1.getTextOffset()) == document.getLineNumber(e2.getTextOffset());
}
public static boolean isTopLevel(@NotNull PsiElement element) {
if (element instanceof StubBasedPsiElement) {
final StubElement stub = ((StubBasedPsiElement)element).getStub();
if (stub != null) {
final StubElement parentStub = stub.getParentStub();
if (parentStub != null) {
return parentStub.getPsi() instanceof PsiFile;
}
}
}
return ScopeUtil.getScopeOwner(element) instanceof PsiFile;
}
public static void deletePycFiles(String pyFilePath) {
if (pyFilePath.endsWith(PyNames.DOT_PY)) {
List<File> filesToDelete = new ArrayList<>();
File pyc = new File(pyFilePath + "c");
if (pyc.exists()) {
filesToDelete.add(pyc);
}
File pyo = new File(pyFilePath + "o");
if (pyo.exists()) {
filesToDelete.add(pyo);
}
final File file = new File(pyFilePath);
File pycache = new File(file.getParentFile(), PyNames.PYCACHE);
if (pycache.isDirectory()) {
final String shortName = FileUtil.getNameWithoutExtension(file);
Collections.addAll(filesToDelete, pycache.listFiles(pathname -> {
if (!FileUtilRt.extensionEquals(pathname.getName(), "pyc")) return false;
String nameWithMagic = FileUtil.getNameWithoutExtension(pathname);
return FileUtil.getNameWithoutExtension(nameWithMagic).equals(shortName);
}));
}
FileUtil.asyncDelete(filesToDelete);
}
}
public static String getElementNameWithoutExtension(PsiNamedElement psiNamedElement) {
return psiNamedElement instanceof PyFile
? FileUtil.getNameWithoutExtension(((PyFile)psiNamedElement).getName())
: psiNamedElement.getName();
}
public static boolean hasUnresolvedAncestors(@NotNull PyClass cls, @NotNull TypeEvalContext context) {
for (PyClassLikeType type : cls.getAncestorTypes(context)) {
if (type == null) {
return true;
}
}
return false;
}
@NotNull
public static AccessDirection getPropertyAccessDirection(@NotNull PyFunction function) {
final Property property = function.getProperty();
if (property != null) {
if (property.getGetter().valueOrNull() == function) {
return AccessDirection.READ;
}
if (property.getSetter().valueOrNull() == function) {
return AccessDirection.WRITE;
}
else if (property.getDeleter().valueOrNull() == function) {
return AccessDirection.DELETE;
}
}
return AccessDirection.READ;
}
public static void removeQualifier(@NotNull final PyReferenceExpression element) {
final PyExpression qualifier = element.getQualifier();
if (qualifier == null) return;
if (qualifier instanceof PyCallExpression) {
final PyExpression callee = ((PyCallExpression)qualifier).getCallee();
if (callee instanceof PyReferenceExpression) {
final PyExpression calleeQualifier = ((PyReferenceExpression)callee).getQualifier();
if (calleeQualifier != null) {
qualifier.replace(calleeQualifier);
return;
}
}
}
final PsiElement dot = PyPsiUtils.getNextNonWhitespaceSibling(qualifier);
if (dot != null) dot.delete();
qualifier.delete();
}
/**
* Returns string that represents element in string search.
*
* @param element element to search
* @return string that represents element
*/
@NotNull
public static String computeElementNameForStringSearch(@NotNull final PsiElement element) {
if (element instanceof PyFile) {
return FileUtil.getNameWithoutExtension(((PyFile)element).getName());
}
if (element instanceof PsiDirectory) {
return ((PsiDirectory)element).getName();
}
// Magic literals are always represented by their string values
if ((element instanceof PyStringLiteralExpression) && PyMagicLiteralTools.isMagicLiteral(element)) {
return ((StringLiteralExpression)element).getStringValue();
}
if (element instanceof PyElement) {
final String name = ((PyElement)element).getName();
if (name != null) {
return name;
}
}
return element.getNode() != null ? element.getNode().getText() : element.getText();
}
public static boolean isOwnScopeComprehension(@NotNull PyComprehensionElement comprehension) {
final boolean isAtLeast30 = !LanguageLevel.forElement(comprehension).isPython2();
final boolean isListComprehension = comprehension instanceof PyListCompExpression;
return !isListComprehension || isAtLeast30;
}
public static ASTNode createNewName(PyElement element, String name) {
return PyElementGenerator.getInstance(element.getProject()).createNameIdentifier(name, LanguageLevel.forElement(element));
}
/**
* Finds element declaration by resolving its references top the top but not further than file (to prevent un-stubbing)
*
* @param elementToResolve element to resolve
* @return its declaration
*/
@NotNull
public static PsiElement resolveToTheTop(@NotNull final PsiElement elementToResolve) {
PsiElement currentElement = elementToResolve;
final Set<PsiElement> checkedElements = new HashSet<>(); // To prevent PY-20553
while (true) {
final PsiReference reference = currentElement.getReference();
if (reference == null) {
break;
}
final PsiElement resolve = reference.resolve();
if (resolve == null || checkedElements.contains(resolve) || resolve.equals(currentElement) || !inSameFile(resolve, currentElement)) {
break;
}
currentElement = resolve;
checkedElements.add(resolve);
}
return currentElement;
}
/**
* Note that returned list may contain {@code null} items, e.g. for unresolved import elements, originally wrapped
* in {@link com.jetbrains.python.psi.resolve.ImportedResolveResult}.
*/
@NotNull
public static List<PsiElement> multiResolveTopPriority(@NotNull PsiElement element, @NotNull PyResolveContext resolveContext) {
if (element instanceof PyReferenceOwner) {
final PsiPolyVariantReference ref = ((PyReferenceOwner)element).getReference(resolveContext);
return filterTopPriorityResults(ref.multiResolve(false));
}
else {
final PsiReference reference = element.getReference();
return reference != null ? Collections.singletonList(reference.resolve()) : Collections.emptyList();
}
}
@NotNull
public static List<PsiElement> multiResolveTopPriority(@NotNull PsiPolyVariantReference reference) {
return filterTopPriorityResults(reference.multiResolve(false));
}
@NotNull
public static List<PsiElement> filterTopPriorityResults(@NotNull ResolveResult[] resolveResults) {
if (resolveResults.length == 0) return Collections.emptyList();
final int maxRate = getMaxRate(Arrays.asList(resolveResults));
return StreamEx
.of(resolveResults)
.filter(resolveResult -> getRate(resolveResult) >= maxRate)
.map(ResolveResult::getElement)
.nonNull()
.toList();
}
@NotNull
public static <E extends ResolveResult> List<E> filterTopPriorityResults(@NotNull List<E> resolveResults) {
if (resolveResults.isEmpty()) return Collections.emptyList();
final int maxRate = getMaxRate(resolveResults);
return ContainerUtil.filter(resolveResults, resolveResult -> getRate(resolveResult) >= maxRate);
}
private static int getMaxRate(@NotNull List<? extends ResolveResult> resolveResults) {
return resolveResults
.stream()
.mapToInt(PyUtil::getRate)
.max()
.orElse(Integer.MIN_VALUE);
}
private static int getRate(@NotNull ResolveResult resolveResult) {
return resolveResult instanceof RatedResolveResult ? ((RatedResolveResult)resolveResult).getRate() : 0;
}
/**
* Gets class init method
*
* @param pyClass class where to find init
* @return class init method if any
*/
@Nullable
public static PyFunction getInitMethod(@NotNull final PyClass pyClass) {
return pyClass.findMethodByName(PyNames.INIT, false, null);
}
/**
* Returns Python language level for a virtual file.
*
* @see LanguageLevel#forElement
*/
@NotNull
public static LanguageLevel getLanguageLevelForVirtualFile(@NotNull Project project,
@NotNull VirtualFile virtualFile) {
if (virtualFile instanceof VirtualFileWindow) {
virtualFile = ((VirtualFileWindow)virtualFile).getDelegate();
}
// Most of the cases should be handled by this one, PyLanguageLevelPusher pushes folders only
final VirtualFile folder = virtualFile.getParent();
if (folder != null) {
final LanguageLevel folderLevel = folder.getUserData(LanguageLevel.KEY);
if (folderLevel != null) {
return folderLevel;
}
final LanguageLevel fileLevel = PythonLanguageLevelPusher.getFileLanguageLevel(project, virtualFile);
if (fileLevel != null) {
return fileLevel;
}
}
else {
// However this allows us to setup language level per file manually
// in case when it is LightVirtualFile
final LanguageLevel level = virtualFile.getUserData(LanguageLevel.KEY);
if (level != null) return level;
if (ApplicationManager.getApplication().isUnitTestMode()) {
final LanguageLevel languageLevel = LanguageLevel.FORCE_LANGUAGE_LEVEL;
if (languageLevel != null) {
return languageLevel;
}
}
}
return guessLanguageLevelWithCaching(project);
}
public static void invalidateLanguageLevelCache(@NotNull Project project) {
project.putUserData(PythonLanguageLevelPusher.PYTHON_LANGUAGE_LEVEL, null);
}
@NotNull
public static LanguageLevel guessLanguageLevelWithCaching(@NotNull Project project) {
LanguageLevel languageLevel = project.getUserData(PythonLanguageLevelPusher.PYTHON_LANGUAGE_LEVEL);
if (languageLevel == null) {
languageLevel = guessLanguageLevel(project);
project.putUserData(PythonLanguageLevelPusher.PYTHON_LANGUAGE_LEVEL, languageLevel);
}
return languageLevel;
}
@NotNull
public static LanguageLevel guessLanguageLevel(@NotNull Project project) {
final ModuleManager moduleManager = ModuleManager.getInstance(project);
if (moduleManager != null) {
LanguageLevel maxLevel = null;
for (Module projectModule : moduleManager.getModules()) {
final Sdk sdk = PythonSdkType.findPythonSdk(projectModule);
if (sdk != null) {
final LanguageLevel level = PythonSdkType.getLanguageLevelForSdk(sdk);
if (maxLevel == null || maxLevel.isOlderThan(level)) {
maxLevel = level;
}
}
}
if (maxLevel != null) {
return maxLevel;
}
}
return LanguageLevel.getDefault();
}
/**
* Clone of C# "as" operator.
* Checks if expression has correct type and casts it if it has. Returns null otherwise.
* It saves coder from "instanceof / cast" chains.
*
* @param expression expression to check
* @param clazz class to cast
* @param <T> class to cast
* @return expression casted to appropriate type (if could be casted). Null otherwise.
*/
@Nullable
@SuppressWarnings("unchecked")
public static <T> T as(@Nullable final Object expression, @NotNull final Class<T> clazz) {
return ObjectUtils.tryCast(expression, clazz);
}
// TODO: Move to PsiElement?
/**
* Searches for references injected to element with certain type
*
* @param element element to search injected references for
* @param expectedClass expected type of element reference resolved to
* @param <T> expected type of element reference resolved to
* @return resolved element if found or null if not found
*/
@Nullable
public static <T extends PsiElement> T findReference(@NotNull final PsiElement element, @NotNull final Class<T> expectedClass) {
for (final PsiReference reference : element.getReferences()) {
final T result = as(reference.resolve(), expectedClass);
if (result != null) {
return result;
}
}
return null;
}
/**
* Converts collection to list of certain type
*
* @param expression expression of collection type
* @param elementClass expected element type
* @param <T> expected element type
* @return list of elements of expected element type
*/
@NotNull
public static <T> List<T> asList(@Nullable final Collection<?> expression, @NotNull final Class<T> elementClass) {
if ((expression == null) || expression.isEmpty()) {
return Collections.emptyList();
}
final List<T> result = new ArrayList<>();
for (final Object element : expression) {
final T toAdd = as(element, elementClass);
if (toAdd != null) {
result.add(toAdd);
}
}
return result;
}
/**
* Force re-highlighting in all open editors that belong to specified project.
*/
public static void rehighlightOpenEditors(final @NotNull Project project) {
ApplicationManager.getApplication().runWriteAction(() -> {
for (Editor editor : EditorFactory.getInstance().getAllEditors()) {
if (editor instanceof EditorEx && editor.getProject() == project) {
final VirtualFile vFile = ((EditorEx)editor).getVirtualFile();
if (vFile != null) {
final EditorHighlighter highlighter = EditorHighlighterFactory.getInstance().createEditorHighlighter(project, vFile);
((EditorEx)editor).setHighlighter(highlighter);
}
}
}
});
}
/**
* Calculates and caches value based on param. Think about it as about map with param as key which flushes on each psi modification.
* <p>
* For nullable function see {@link #getNullableParameterizedCachedValue(PsiElement, Object, NullableFunction)}.
* <p>
* This function is used instead of {@link CachedValuesManager#createParameterizedCachedValue(ParameterizedCachedValueProvider, boolean)}
* because parameter is not used as key there but used only for first calculation. Hence this should have functional dependency on element.
*
* @param element place to store cache
* @param param param to be used as key
* @param f function to produce value for key
* @param <T> value type
* @param <P> key type
*/
@NotNull
public static <T, P> T getParameterizedCachedValue(@NotNull PsiElement element, @Nullable P param, @NotNull NotNullFunction<P, T> f) {
final T result = getNullableParameterizedCachedValue(element, param, f);
assert result != null;
return result;
}
/**
* Same as {@link #getParameterizedCachedValue(PsiElement, Object, NotNullFunction)} but allows nulls.
*/
@Nullable
public static <T, P> T getNullableParameterizedCachedValue(@NotNull PsiElement element,
@Nullable P param,
@NotNull NullableFunction<P, T> f) {
final CachedValuesManager manager = CachedValuesManager.getManager(element.getProject());
final Map<Optional<P>, Optional<T>> cache = CachedValuesManager.getCachedValue(element, manager.getKeyForClass(f.getClass()), () -> {
// concurrent hash map is a null-hostile collection
return CachedValueProvider.Result.create(Maps.newConcurrentMap(), PsiModificationTracker.MODIFICATION_COUNT);
});
// Don't use ConcurrentHashMap#computeIfAbsent(), it blocks if the function tries to update the cache recursively for the same key
// during computation. We can accept here that some values will be computed several times due to non-atomic updates.
final Optional<P> wrappedParam = Optional.ofNullable(param);
Optional<T> value = cache.get(wrappedParam);
if (value == null) {
value = Optional.ofNullable(f.fun(param));
cache.put(wrappedParam, value);
}
return value.orElse(null);
}
/**
* This method is allowed to be called from any thread, but in general you should not set {@code modal=true} if you're calling it
* from the write action, because in this case {@code function} will be executed right in the current thread (presumably EDT)
* without any progress whatsoever to avoid possible deadlock.
*
* @see ApplicationImpl#runProcessWithProgressSynchronously(Runnable, String, boolean, Project, JComponent, String)
*/
public static void runWithProgress(@Nullable Project project, @Nls(capitalization = Nls.Capitalization.Title) @NotNull String title,
boolean modal, boolean canBeCancelled, @NotNull final Consumer<ProgressIndicator> function) {
if (modal) {
ProgressManager.getInstance().run(new Task.Modal(project, title, canBeCancelled) {
@Override
public void run(@NotNull ProgressIndicator indicator) {
function.consume(indicator);
}
});
}
else {
ProgressManager.getInstance().run(new Task.Backgroundable(project, title, canBeCancelled) {
@Override
public void run(@NotNull ProgressIndicator indicator) {
function.consume(indicator);
}
});
}
}
/**
* Executes code only if <pre>_PYCHARM_VERBOSE_MODE</pre> is set in env (which should be done for debug purposes only)
*
* @param runnable code to call
*/
public static void verboseOnly(@NotNull final Runnable runnable) {
if (VERBOSE_MODE) {
runnable.run();
}
}
/**
* Returns the line comment that immediately precedes statement list of the given compound statement. Python parser ensures
* that it follows the statement header, i.e. it's directly after the colon, not on its own line.
*/
@Nullable
public static PsiComment getCommentOnHeaderLine(@NotNull PyStatementListContainer container) {
return as(getHeaderEndAnchor(container), PsiComment.class);
}
@NotNull
public static PsiElement getHeaderEndAnchor(@NotNull PyStatementListContainer container) {
final PyStatementList statementList = container.getStatementList();
return ObjectUtils.notNull(PyPsiUtils.getPrevNonWhitespaceSibling(statementList));
}
public static boolean isPy2ReservedWord(@NotNull PyReferenceExpression node) {
if (LanguageLevel.forElement(node).isPython2()) {
if (!node.isQualified()) {
final String name = node.getName();
if (PyNames.NONE.equals(name) || PyNames.FALSE.equals(name) || PyNames.TRUE.equals(name)) {
return true;
}
}
}
return false;
}
/**
* Retrieves the document from {@link PsiDocumentManager} using the anchor PSI element and, if it's not null,
* passes it to the consumer function.
* <p>
* The document is first released from pending PSI operations and then committed after the function has been applied
* in a {@code try/finally} block, so that subsequent operations on PSI could be performed.
*
* @see PsiDocumentManager#doPostponedOperationsAndUnblockDocument(Document)
* @see PsiDocumentManager#commitDocument(Document)
* @see #updateDocumentUnblockedAndCommitted(PsiElement, Function)
*/
public static void updateDocumentUnblockedAndCommitted(@NotNull PsiElement anchor, @NotNull Consumer<Document> consumer) {
updateDocumentUnblockedAndCommitted(anchor, document -> {
consumer.consume(document);
return null;
});
}
@Nullable
public static <T> T updateDocumentUnblockedAndCommitted(@NotNull PsiElement anchor, @NotNull Function<Document, T> func) {
final PsiDocumentManager manager = PsiDocumentManager.getInstance(anchor.getProject());
final Document document = manager.getDocument(anchor.getContainingFile());
if (document != null) {
manager.doPostponedOperationsAndUnblockDocument(document);
try {
return func.fun(document);
}
finally {
manager.commitDocument(document);
}
}
return null;
}
@Nullable
public static PyType getReturnTypeToAnalyzeAsCallType(@NotNull PyFunction function, @NotNull TypeEvalContext context) {
if (isInit(function)) {
final PyClass cls = function.getContainingClass();
if (cls != null) {
for (PyTypeProvider provider : Extensions.getExtensions(PyTypeProvider.EP_NAME)) {
final PyType providedClassType = provider.getGenericType(cls, context);
if (providedClassType != null) {
return providedClassType;
}
}
final PyInstantiableType classType = as(context.getType(cls), PyInstantiableType.class);
if (classType != null) {
return classType.toInstance();
}
}
}
return context.getReturnType(function);
}
/**
* Create a new expressions fragment from the given text, setting the specified element as its context,
* and return the contained expression of the first expression statement in it.
*
* @param expressionText text of the expression
* @param context context element used to resolve symbols in the expression
* @return instance of {@link PyExpression} as described
* @see PyExpressionCodeFragment
*/
@Nullable
public static PyExpression createExpressionFromFragment(@NotNull String expressionText, @NotNull PsiElement context) {
final PyExpressionCodeFragmentImpl codeFragment =
new PyExpressionCodeFragmentImpl(context.getProject(), "dummy.py", expressionText, false);
codeFragment.setContext(context);
final PyExpressionStatement statement = as(codeFragment.getFirstChild(), PyExpressionStatement.class);
return statement != null ? statement.getExpression() : null;
}
public static class KnownDecoratorProviderHolder {
public static final PyKnownDecoratorProvider[] KNOWN_DECORATOR_PROVIDERS = Extensions.getExtensions(PyKnownDecoratorProvider.EP_NAME);
private KnownDecoratorProviderHolder() {
}
}
/**
* If argument is a PsiDirectory, turn it into a PsiFile that points to __init__.py in that directory.
* If there's no __init__.py there, null is returned, there's no point to resolve to a dir which is not a package.
* Alas, resolve() and multiResolve() can't return anything but a PyFile or PsiFileImpl.isPsiUpToDate() would fail.
* This is because isPsiUpToDate() relies on identity of objects returned by FileViewProvider.getPsi().
* If we ever need to exactly tell a dir from __init__.py, that logic has to change.
*
* @param target a resolve candidate.
* @return a PsiFile if target was a PsiDirectory, or null, or target unchanged.
*/
@Nullable
public static PsiElement turnDirIntoInit(@Nullable PsiElement target) {
if (target instanceof PsiDirectory) {
final PsiDirectory dir = (PsiDirectory)target;
final PsiFile initStub = dir.findFile(PyNames.INIT_DOT_PYI);
if (initStub != null) {
return initStub;
}
final PsiFile initFile = dir.findFile(PyNames.INIT_DOT_PY);
if (initFile != null) {
return initFile; // ResolveImportUtil will extract directory part as needed, everyone else are better off with a file.
}
else {
return null;
} // dir without __init__.py does not resolve
}
else {
return target;
} // don't touch non-dirs
}
/**
* If directory is a PsiDirectory, that is also a valid Python package, return PsiFile that points to __init__.py,
* if such file exists, or directory itself (i.e. namespace package). Otherwise, return {@code null}.
* Unlike {@link #turnDirIntoInit(PsiElement)} this function handles namespace packages and
* accepts only PsiDirectories as target.
*
* @param directory directory to check
* @param anchor optional PSI element to determine language level as for {@link #isPackage(PsiDirectory, PsiElement)}
* @return PsiFile or PsiDirectory, if target is a Python package and {@code null} null otherwise
*/
@Nullable
public static PsiElement getPackageElement(@NotNull PsiDirectory directory, @Nullable PsiElement anchor) {
if (isPackage(directory, anchor)) {
final PsiElement init = turnDirIntoInit(directory);
if (init != null) {
return init;
}
return directory;
}
return null;
}
/**
* If target is a Python module named __init__.py file, return its directory. Otherwise return target unchanged.
*
* @param target PSI element to check
* @return PsiDirectory or target unchanged
*/
@Contract("null -> null")
@Nullable
public static PsiElement turnInitIntoDir(@Nullable PsiElement target) {
if (target instanceof PyFile && isPackage((PsiFile)target)) {
return ((PsiFile)target).getContainingDirectory();
}
return target;
}
/**
* @see #isPackage(PsiDirectory, boolean, PsiElement)
*/
public static boolean isPackage(@NotNull PsiDirectory directory, @Nullable PsiElement anchor) {
return isPackage(directory, true, anchor);
}
/**
* Checks that given PsiDirectory can be treated as Python package, i.e. it's either contains __init__.py or it's a namespace package
* (effectively any directory in Python 3.3 and above). Setuptools namespace packages can be checked as well, but it requires access to
* {@link PySetuptoolsNamespaceIndex} and may slow things down during update of project indexes.
* Also note that this method does not check that directory itself and its parents have valid importable names,
* use {@link PyNames#isIdentifier(String)} for this purpose.
*
* @param directory PSI directory to check
* @param checkSetupToolsPackages whether setuptools namespace packages should be considered as well
* @param anchor optional anchor element to determine language level
* @return whether given directory is Python package
* @see PyNames#isIdentifier(String)
*/
public static boolean isPackage(@NotNull PsiDirectory directory, boolean checkSetupToolsPackages, @Nullable PsiElement anchor) {
for (PyCustomPackageIdentifier customPackageIdentifier : PyCustomPackageIdentifier.EP_NAME.getExtensions()) {
if (customPackageIdentifier.isPackage(directory)) {
return true;
}
}
if (directory.findFile(PyNames.INIT_DOT_PY) != null) {
return true;
}
final LanguageLevel level = anchor != null ?
LanguageLevel.forElement(anchor) :
getLanguageLevelForVirtualFile(directory.getProject(), directory.getVirtualFile());
if (!level.isPython2()) {
return true;
}
return checkSetupToolsPackages && isSetuptoolsNamespacePackage(directory);
}
public static boolean isPackage(@NotNull PsiFile file) {
for (PyCustomPackageIdentifier customPackageIdentifier : PyCustomPackageIdentifier.EP_NAME.getExtensions()) {
if (customPackageIdentifier.isPackageFile(file)) {
return true;
}
}
return PyNames.INIT_DOT_PY.equals(file.getName());
}
public static boolean isPackage(@NotNull PsiFileSystemItem anchor, @Nullable PsiElement location) {
return anchor instanceof PsiFile ? isPackage((PsiFile)anchor) :
anchor instanceof PsiDirectory && isPackage((PsiDirectory)anchor, location);
}
private static boolean isSetuptoolsNamespacePackage(@NotNull PsiDirectory directory) {
final String packagePath = getPackagePath(directory);
return packagePath != null && !PySetuptoolsNamespaceIndex.find(packagePath, directory.getProject()).isEmpty();
}
@Nullable
private static String getPackagePath(@NotNull PsiDirectory directory) {
final QualifiedName name = QualifiedNameFinder.findShortestImportableQName(directory);
return name != null ? name.toString() : null;
}
/**
* Counts initial underscores of an identifier.
*
* @param name identifier
* @return 0 if null or no initial underscores found, 1 if there's only one underscore, 2 if there's two or more initial underscores.
*/
public static int getInitialUnderscores(@Nullable String name) {
return name == null ? 0 : name.startsWith("__") ? 2 : name.startsWith(PyNames.UNDERSCORE) ? 1 : 0;
}
/**
* @param name
* @return true iff the name looks like a class-private one, starting with two underscores but not ending with two underscores.
*/
public static boolean isClassPrivateName(@NotNull String name) {
return name.startsWith("__") && !name.endsWith("__");
}
public static boolean isSpecialName(@NotNull String name) {
return name.length() > 4 && name.startsWith("__") && name.endsWith("__");
}
/**
* Constructs new lookup element for completion of keyword argument with equals sign appended.
*
* @param name name of the parameter
* @param settingsAnchor file to check code style settings and surround equals sign with spaces if necessary
* @return lookup element
*/
@NotNull
public static LookupElement createNamedParameterLookup(@NotNull String name, @NotNull PsiFile settingsAnchor) {
final String suffix;
if (CodeStyle.getCustomSettings(settingsAnchor, PyCodeStyleSettings.class).SPACE_AROUND_EQ_IN_KEYWORD_ARGUMENT) {
suffix = " = ";
}
else {
suffix = "=";
}
LookupElementBuilder lookupElementBuilder = LookupElementBuilder.create(name + suffix).withIcon(PlatformIcons.PARAMETER_ICON);
lookupElementBuilder = lookupElementBuilder.withInsertHandler(OverwriteEqualsInsertHandler.INSTANCE);
return PrioritizedLookupElement.withGrouping(lookupElementBuilder, 1);
}
/**
* Peels argument expression of parentheses and of keyword argument wrapper
*
* @param expr an item of getArguments() array
* @return expression actually passed as argument
*/
@Nullable
public static PyExpression peelArgument(PyExpression expr) {
while (expr instanceof PyParenthesizedExpression) expr = ((PyParenthesizedExpression)expr).getContainedExpression();
if (expr instanceof PyKeywordArgument) expr = ((PyKeywordArgument)expr).getValueExpression();
return expr;
}
public static String getFirstParameterName(PyFunction container) {
String selfName = PyNames.CANONICAL_SELF;
if (container != null) {
final PyParameter[] params = container.getParameterList().getParameters();
if (params.length > 0) {
final PyNamedParameter named = params[0].getAsNamed();
if (named != null) {
selfName = named.getName();
}
}
}
return selfName;
}
/**
* @return Source roots <strong>and</strong> content roots for element's project
*/
@NotNull
public static Collection<VirtualFile> getSourceRoots(@NotNull PsiElement foothold) {
final Module module = ModuleUtilCore.findModuleForPsiElement(foothold);
if (module != null) {
return getSourceRoots(module);
}
return Collections.emptyList();
}
/**
* @return Source roots <strong>and</strong> content roots for module
*/
@NotNull
public static Collection<VirtualFile> getSourceRoots(@NotNull Module module) {
final Set<VirtualFile> result = new LinkedHashSet<>();
final ModuleRootManager manager = ModuleRootManager.getInstance(module);
Collections.addAll(result, manager.getSourceRoots());
Collections.addAll(result, manager.getContentRoots());
return result;
}
@Nullable
public static VirtualFile findInRoots(Module module, String path) {
if (module != null) {
for (VirtualFile root : getSourceRoots(module)) {
VirtualFile file = root.findFileByRelativePath(path);
if (file != null) {
return file;
}
}
}
return null;
}
/**
* @deprecated This method will be removed in 2018.3.
*/
@Nullable
@Deprecated
public static List<String> getStringListFromTargetExpression(PyTargetExpression attr) {
return strListValue(attr.findAssignedValue());
}
@Nullable
public static List<String> strListValue(PyExpression value) {
while (value instanceof PyParenthesizedExpression) {
value = ((PyParenthesizedExpression)value).getContainedExpression();
}
if (value instanceof PySequenceExpression) {
final PyExpression[] elements = ((PySequenceExpression)value).getElements();
List<String> result = new ArrayList<>(elements.length);
for (PyExpression element : elements) {
if (!(element instanceof PyStringLiteralExpression)) {
return null;
}
result.add(((PyStringLiteralExpression)element).getStringValue());
}
return result;
}
return null;
}
@NotNull
public static Map<String, PyExpression> dictValue(@NotNull PyDictLiteralExpression dict) {
Map<String, PyExpression> result = Maps.newLinkedHashMap();
for (PyKeyValueExpression keyValue : dict.getElements()) {
PyExpression key = keyValue.getKey();
PyExpression value = keyValue.getValue();
if (key instanceof PyStringLiteralExpression) {
result.put(((PyStringLiteralExpression)key).getStringValue(), value);
}
}
return result;
}
/**
* @param what thing to search for
* @param variants things to search among
* @return true iff what.equals() one of the variants.
*/
public static <T> boolean among(@NotNull T what, T... variants) {
for (T s : variants) {
if (what.equals(s)) return true;
}
return false;
}
@Nullable
public static String getKeywordArgumentString(PyCallExpression expr, String keyword) {
return PyPsiUtils.strValue(expr.getKeywordArgument(keyword));
}
public static boolean isExceptionClass(PyClass pyClass) {
if (isBaseException(pyClass.getQualifiedName())) {
return true;
}
for (PyClassLikeType type : pyClass.getAncestorTypes(TypeEvalContext.codeInsightFallback(pyClass.getProject()))) {
if (type != null && isBaseException(type.getClassQName())) {
return true;
}
}
return false;
}
private static boolean isBaseException(String name) {
return name != null && (name.contains("BaseException") || name.startsWith("exceptions."));
}
public static class MethodFlags {
private final boolean myIsStaticMethod;
private final boolean myIsMetaclassMethod;
private final boolean myIsSpecialMetaclassMethod;
private final boolean myIsClassMethod;
/**
* @return true iff the method belongs to a metaclass (an ancestor of 'type').
*/
public boolean isMetaclassMethod() {
return myIsMetaclassMethod;
}
/**
* @return iff isMetaclassMethod and the method is either __init__ or __call__.
*/
public boolean isSpecialMetaclassMethod() {
return myIsSpecialMetaclassMethod;
}
public boolean isStaticMethod() {
return myIsStaticMethod;
}
public boolean isClassMethod() {
return myIsClassMethod;
}
private MethodFlags(boolean isClassMethod, boolean isStaticMethod, boolean isMetaclassMethod, boolean isSpecialMetaclassMethod) {
myIsClassMethod = isClassMethod;
myIsStaticMethod = isStaticMethod;
myIsMetaclassMethod = isMetaclassMethod;
myIsSpecialMetaclassMethod = isSpecialMetaclassMethod;
}
/**
* @param node a function
* @return a new flags object, or null if the function is not a method
*/
@Nullable
public static MethodFlags of(@NotNull PyFunction node) {
PyClass cls = node.getContainingClass();
if (cls != null) {
PyFunction.Modifier modifier = node.getModifier();
boolean isMetaclassMethod = false;
PyClass type_cls = PyBuiltinCache.getInstance(node).getClass("type");
for (PyClass ancestor_cls : cls.getAncestorClasses(null)) {
if (ancestor_cls == type_cls) {
isMetaclassMethod = true;
break;
}
}
final String method_name = node.getName();
boolean isSpecialMetaclassMethod = isMetaclassMethod && method_name != null && among(method_name, PyNames.INIT, "__call__");
return new MethodFlags(modifier == CLASSMETHOD, modifier == STATICMETHOD, isMetaclassMethod, isSpecialMetaclassMethod);
}
return null;
}
//TODO: Doc
public boolean isInstanceMethod() {
return !(myIsClassMethod || myIsStaticMethod);
}
}
public static boolean isSuperCall(@NotNull PyCallExpression node) {
PyClass klass = PsiTreeUtil.getParentOfType(node, PyClass.class);
if (klass == null) return false;
PyExpression callee = node.getCallee();
if (callee == null) return false;
String name = callee.getName();
if (PyNames.SUPER.equals(name)) {
PsiReference reference = callee.getReference();
if (reference == null) return false;
PsiElement resolved = reference.resolve();
PyBuiltinCache cache = PyBuiltinCache.getInstance(node);
if (resolved != null && cache.isBuiltin(resolved)) {
PyExpression[] args = node.getArguments();
if (args.length > 0) {
String firstArg = args[0].getText();
if (firstArg.equals(klass.getName()) || firstArg.equals(PyNames.CANONICAL_SELF + "." + PyNames.__CLASS__)) {
return true;
}
for (PyClass s : klass.getAncestorClasses(null)) {
if (firstArg.equals(s.getName())) {
return true;
}
}
}
else {
return true;
}
}
}
return false;
}
@NotNull
public static PyFile getOrCreateFile(String path, Project project) {
final VirtualFile vfile = LocalFileSystem.getInstance().findFileByIoFile(new File(path));
final PsiFile psi;
if (vfile == null) {
final File file = new File(path);
try {
final VirtualFile baseDir = project.getBaseDir();
final FileTemplateManager fileTemplateManager = FileTemplateManager.getInstance(project);
final FileTemplate template = fileTemplateManager.getInternalTemplate("Python Script");
final Properties properties = fileTemplateManager.getDefaultProperties();
properties.setProperty("NAME", FileUtil.getNameWithoutExtension(file.getName()));
final String content = (template != null) ? template.getText(properties) : null;
psi = PyExtractSuperclassHelper.placeFile(project,
StringUtil.notNullize(
file.getParent(),
baseDir != null ? baseDir
.getPath() : "."
),
file.getName(),
content
);
}
catch (IOException e) {
throw new IncorrectOperationException(String.format("Cannot create file '%s'", path), (Throwable)e);
}
}
else {
psi = PsiManager.getInstance(project).findFile(vfile);
}
if (!(psi instanceof PyFile)) {
throw new IncorrectOperationException(PyBundle.message(
"refactoring.move.module.members.error.cannot.place.elements.into.nonpython.file"));
}
return (PyFile)psi;
}
@Nullable
public static PsiElement findPrevAtOffset(PsiFile psiFile, int caretOffset, Class... toSkip) {
PsiElement element;
if (caretOffset < 0) {
return null;
}
int lineStartOffset = 0;
final Document document = PsiDocumentManager.getInstance(psiFile.getProject()).getDocument(psiFile);
if (document != null) {
int lineNumber = document.getLineNumber(caretOffset);
lineStartOffset = document.getLineStartOffset(lineNumber);
}
do {
caretOffset--;
element = psiFile.findElementAt(caretOffset);
}
while (caretOffset >= lineStartOffset && PsiTreeUtil.instanceOf(element, toSkip));
return PsiTreeUtil.instanceOf(element, toSkip) ? null : element;
}
@Nullable
public static PsiElement findNonWhitespaceAtOffset(PsiFile psiFile, int caretOffset) {
PsiElement element = findNextAtOffset(psiFile, caretOffset, PsiWhiteSpace.class);
if (element == null) {
element = findPrevAtOffset(psiFile, caretOffset - 1, PsiWhiteSpace.class);
}
return element;
}
@Nullable
public static PsiElement findElementAtOffset(PsiFile psiFile, int caretOffset) {
PsiElement element = findPrevAtOffset(psiFile, caretOffset);
if (element == null) {
element = findNextAtOffset(psiFile, caretOffset);
}
return element;
}
@Nullable
public static PsiElement findNextAtOffset(@NotNull final PsiFile psiFile, int caretOffset, Class... toSkip) {
PsiElement element = psiFile.findElementAt(caretOffset);
if (element == null) {
return null;
}
final Document document = PsiDocumentManager.getInstance(psiFile.getProject()).getDocument(psiFile);
int lineEndOffset = 0;
if (document != null) {
int lineNumber = document.getLineNumber(caretOffset);
lineEndOffset = document.getLineEndOffset(lineNumber);
}
while (caretOffset < lineEndOffset && PsiTreeUtil.instanceOf(element, toSkip)) {
caretOffset++;
element = psiFile.findElementAt(caretOffset);
}
return PsiTreeUtil.instanceOf(element, toSkip) ? null : element;
}
/**
* Adds element to statement list to the correct place according to its dependencies.
*
* @param element to insert
* @param statementList where element should be inserted
* @return inserted element
*/
public static <T extends PyElement> T addElementToStatementList(@NotNull final T element,
@NotNull final PyStatementList statementList) {
PsiElement before = null;
PsiElement after = null;
for (final PyStatement statement : statementList.getStatements()) {
if (PyDependenciesComparator.depends(element, statement)) {
after = statement;
}
else if (PyDependenciesComparator.depends(statement, element)) {
before = statement;
}
}
final PsiElement result;
if (after != null) {
result = statementList.addAfter(element, after);
}
else if (before != null) {
result = statementList.addBefore(element, before);
}
else {
result = addElementToStatementList(element, statementList, true);
}
@SuppressWarnings("unchecked") // Inserted element can't have different type
final T resultCasted = (T)result;
return resultCasted;
}
/**
* Inserts specified element into the statement list either at the beginning or at its end. If new element is going to be
* inserted at the beginning, any preceding docstrings and/or calls to super methods will be skipped.
* Moreover if statement list previously didn't contain any statements, explicit new line and indentation will be inserted in
* front of it.
*
* @param element element to insert
* @param statementList statement list
* @param toTheBeginning whether to insert element at the beginning or at the end of the statement list
* @return actually inserted element as for {@link PsiElement#add(PsiElement)}
*/
@NotNull
public static PsiElement addElementToStatementList(@NotNull PsiElement element,
@NotNull PyStatementList statementList,
boolean toTheBeginning) {
final PsiElement prevElem = PyPsiUtils.getPrevNonWhitespaceSibling(statementList);
// If statement list is on the same line as previous element (supposedly colon), move its only statement on the next line
if (prevElem != null && onSameLine(statementList, prevElem)) {
final PsiDocumentManager manager = PsiDocumentManager.getInstance(statementList.getProject());
final Document document = manager.getDocument(statementList.getContainingFile());
if (document != null) {
final PyStatementListContainer container = (PyStatementListContainer)statementList.getParent();
manager.doPostponedOperationsAndUnblockDocument(document);
final String indentation = "\n" + PyIndentUtil.getElementIndent(statementList);
// If statement list was empty initially, we need to add some anchor statement ("pass"), so that preceding new line was not
// parsed as following entire StatementListContainer (e.g. function). It's going to be replaced anyway.
final String text = statementList.getStatements().length == 0 ? indentation + PyNames.PASS : indentation;
document.insertString(statementList.getTextRange().getStartOffset(), text);
manager.commitDocument(document);
statementList = container.getStatementList();
}
}
final PsiElement firstChild = statementList.getFirstChild();
if (firstChild == statementList.getLastChild() && firstChild instanceof PyPassStatement) {
element = firstChild.replace(element);
}
else {
final PyStatement[] statements = statementList.getStatements();
if (toTheBeginning && statements.length > 0) {
final PyDocStringOwner docStringOwner = PsiTreeUtil.getParentOfType(statementList, PyDocStringOwner.class);
PyStatement anchor = statements[0];
if (docStringOwner != null && anchor instanceof PyExpressionStatement &&
((PyExpressionStatement)anchor).getExpression() == docStringOwner.getDocStringExpression()) {
final PyStatement next = PsiTreeUtil.getNextSiblingOfType(anchor, PyStatement.class);
if (next == null) {
return statementList.addAfter(element, anchor);
}
anchor = next;
}
while (anchor instanceof PyExpressionStatement) {
final PyExpression expression = ((PyExpressionStatement)anchor).getExpression();
if (expression instanceof PyCallExpression) {
final PyExpression callee = ((PyCallExpression)expression).getCallee();
if ((isSuperCall((PyCallExpression)expression) || (callee != null && PyNames.INIT.equals(callee.getName())))) {
final PyStatement next = PsiTreeUtil.getNextSiblingOfType(anchor, PyStatement.class);
if (next == null) {
return statementList.addAfter(element, anchor);
}
anchor = next;
continue;
}
}
break;
}
element = statementList.addBefore(element, anchor);
}
else {
element = statementList.add(element);
}
}
return element;
}
public static boolean isSignatureCompatibleTo(@NotNull PyCallable callable, @NotNull PyCallable otherCallable,
@NotNull TypeEvalContext context) {
final List<PyCallableParameter> parameters = callable.getParameters(context);
final List<PyCallableParameter> otherParameters = otherCallable.getParameters(context);
final int optionalCount = optionalParametersCount(parameters);
final int otherOptionalCount = optionalParametersCount(otherParameters);
final int requiredCount = requiredParametersCount(callable, parameters);
final int otherRequiredCount = requiredParametersCount(otherCallable, otherParameters);
if (hasPositionalContainer(otherParameters) || hasKeywordContainer(otherParameters)) {
if (otherParameters.size() == specialParametersCount(otherCallable, otherParameters)) {
return true;
}
}
if (hasPositionalContainer(parameters) || hasKeywordContainer(parameters)) {
return requiredCount <= otherRequiredCount;
}
return requiredCount <= otherRequiredCount && parameters.size() >= otherParameters.size() && optionalCount >= otherOptionalCount;
}
private static int optionalParametersCount(@NotNull List<PyCallableParameter> parameters) {
int n = 0;
for (PyCallableParameter parameter : parameters) {
if (parameter.hasDefaultValue()) {
n++;
}
}
return n;
}
private static int requiredParametersCount(@NotNull PyCallable callable, @NotNull List<PyCallableParameter> parameters) {
return parameters.size() - optionalParametersCount(parameters) - specialParametersCount(callable, parameters);
}
private static int specialParametersCount(@NotNull PyCallable callable, @NotNull List<PyCallableParameter> parameters) {
int n = 0;
if (hasPositionalContainer(parameters)) {
n++;
}
if (hasKeywordContainer(parameters)) {
n++;
}
if (isFirstParameterSpecial(callable, parameters)) {
n++;
}
return n;
}
private static boolean hasPositionalContainer(@NotNull List<PyCallableParameter> parameters) {
for (PyCallableParameter parameter : parameters) {
if (parameter.isPositionalContainer()) {
return true;
}
}
return false;
}
private static boolean hasKeywordContainer(@NotNull List<PyCallableParameter> parameters) {
for (PyCallableParameter parameter : parameters) {
if (parameter.isKeywordContainer()) {
return true;
}
}
return false;
}
private static boolean isFirstParameterSpecial(@NotNull PyCallable callable, @NotNull List<PyCallableParameter> parameters) {
final PyFunction method = callable.asMethod();
if (method != null) {
return PyNames.NEW.equals(method.getName()) || method.getModifier() != STATICMETHOD;
}
else {
final PyCallableParameter first = ContainerUtil.getFirstItem(parameters);
return first != null && PyNames.CANONICAL_SELF.equals(first.getName());
}
}
public static boolean isInit(@NotNull final PyFunction function) {
return PyNames.INIT.equals(function.getName());
}
/**
* Filters out {@link PyMemberInfo}
* that should not be displayed in this refactoring (like object)
*
* @param pyMemberInfos collection to sort
* @return sorted collection
*/
@NotNull
public static Collection<PyMemberInfo<PyElement>> filterOutObject(@NotNull final Collection<PyMemberInfo<PyElement>> pyMemberInfos) {
return Collections2.filter(pyMemberInfos, new ObjectPredicate(false));
}
public static boolean isStarImportableFrom(@NotNull String name, @NotNull PyFile file) {
final List<String> dunderAll = file.getDunderAll();
return dunderAll != null ? dunderAll.contains(name) : !name.startsWith("_");
}
/**
* Filters only PyClass object (new class)
*/
public static class ObjectPredicate extends NotNullPredicate<PyMemberInfo<PyElement>> {
private final boolean myAllowObjects;
/**
* @param allowObjects allows only objects if true. Allows all but objects otherwise.
*/
public ObjectPredicate(final boolean allowObjects) {
myAllowObjects = allowObjects;
}
@Override
public boolean applyNotNull(@NotNull final PyMemberInfo<PyElement> input) {
return myAllowObjects == isObject(input);
}
private static boolean isObject(@NotNull final PyMemberInfo<PyElement> classMemberInfo) {
final PyElement element = classMemberInfo.getMember();
return (element instanceof PyClass) && PyNames.OBJECT.equals(element.getName());
}
}
/**
* Sometimes you do not know real FQN of some class, but you know class name and its package.
* I.e. {@code django.apps.conf.AppConfig} is not documented, but you know
* {@code AppConfig} and {@code django} package.
*
* @param symbol element to check (class or function)
* @param expectedPackage package like "django"
* @param expectedName expected name (i.e. AppConfig)
* @return true if element in package
* @deprecated use {@link com.jetbrains.python.nameResolver.FQNamesProvider#isNameMatches(PyQualifiedNameOwner)}
* Remove in 2018
*/
@Deprecated
public static boolean isSymbolInPackage(@NotNull final PyQualifiedNameOwner symbol,
@NotNull final String expectedPackage,
@NotNull final String expectedName) {
final String qualifiedNameString = symbol.getQualifiedName();
if (qualifiedNameString == null) {
return false;
}
final QualifiedName qualifiedName = QualifiedName.fromDottedString(qualifiedNameString);
final String aPackage = qualifiedName.getFirstComponent();
if (!(expectedPackage.equals(aPackage))) {
return false;
}
final String symbolName = qualifiedName.getLastComponent();
return expectedName.equals(symbolName);
}
public static boolean isObjectClass(@NotNull PyClass cls) {
final String name = cls.getQualifiedName();
return PyNames.OBJECT.equals(name) || PyNames.TYPES_INSTANCE_TYPE.equals(name);
}
public static boolean isInScratchFile(@NotNull PsiElement element) {
return ScratchFileService.isInScratchRoot(PsiUtilCore.getVirtualFile(element));
}
@Nullable
public static PyType getReturnTypeOfMember(@NotNull PyType type,
@NotNull String memberName,
@Nullable PyExpression location,
@NotNull TypeEvalContext context) {
final PyResolveContext resolveContext = PyResolveContext.noImplicits().withTypeEvalContext(context);
final List<? extends RatedResolveResult> resolveResults = type.resolveMember(memberName, location, AccessDirection.READ,
resolveContext);
if (resolveResults != null) {
final List<PyType> types = new ArrayList<>();
for (RatedResolveResult resolveResult : resolveResults) {
final PyType returnType = getReturnType(resolveResult.getElement(), context);
if (returnType != null) {
types.add(returnType);
}
}
return PyUnionType.union(types);
}
return null;
}
@Nullable
private static PyType getReturnType(@Nullable PsiElement element, @NotNull TypeEvalContext context) {
if (element instanceof PyTypedElement) {
final PyType type = context.getType((PyTypedElement)element);
return getReturnType(type, context);
}
return null;
}
@Nullable
private static PyType getReturnType(@Nullable PyType type, @NotNull TypeEvalContext context) {
if (type instanceof PyCallableType) {
return ((PyCallableType)type).getReturnType(context);
}
if (type instanceof PyUnionType) {
final List<PyType> types = new ArrayList<>();
for (PyType pyType : ((PyUnionType)type).getMembers()) {
final PyType returnType = getReturnType(pyType, context);
if (returnType != null) {
types.add(returnType);
}
}
return PyUnionType.union(types);
}
return null;
}
public static boolean isEmptyFunction(@NotNull PyFunction function) {
final PyStatementList statementList = function.getStatementList();
final PyStatement[] statements = statementList.getStatements();
if (statements.length == 0) {
return true;
}
else if (statements.length == 1) {
if (isStringLiteral(statements[0]) || isPassOrRaiseOrEmptyReturnOrEllipsis(statements[0])) {
return true;
}
}
else if (statements.length == 2) {
if (isStringLiteral(statements[0]) && (isPassOrRaiseOrEmptyReturnOrEllipsis(statements[1]))) {
return true;
}
}
return false;
}
private static boolean isPassOrRaiseOrEmptyReturnOrEllipsis(PyStatement stmt) {
if (stmt instanceof PyPassStatement || stmt instanceof PyRaiseStatement) {
return true;
}
if (stmt instanceof PyReturnStatement && ((PyReturnStatement)stmt).getExpression() == null) {
return true;
}
if (stmt instanceof PyExpressionStatement) {
final PyExpression expression = ((PyExpressionStatement)stmt).getExpression();
if (expression instanceof PyNoneLiteralExpression && ((PyNoneLiteralExpression)expression).isEllipsis()) {
return true;
}
}
return false;
}
public static boolean isStringLiteral(@Nullable PyStatement stmt) {
if (stmt instanceof PyExpressionStatement) {
final PyExpression expr = ((PyExpressionStatement)stmt).getExpression();
if (expr instanceof PyStringLiteralExpression) {
return true;
}
}
return false;
}
@Nullable
public static PyLoopStatement getCorrespondingLoop(@NotNull PsiElement breakOrContinue) {
return breakOrContinue instanceof PyContinueStatement || breakOrContinue instanceof PyBreakStatement
? getCorrespondingLoopImpl(breakOrContinue)
: null;
}
@Nullable
private static PyLoopStatement getCorrespondingLoopImpl(@NotNull PsiElement element) {
final PyLoopStatement loop = PsiTreeUtil.getParentOfType(element, PyLoopStatement.class, true, ScopeOwner.class);
if (loop instanceof PyStatementWithElse && PsiTreeUtil.isAncestor(((PyStatementWithElse)loop).getElsePart(), element, true)) {
return getCorrespondingLoopImpl(loop);
}
return loop;
}
public static boolean isForbiddenMutableDefault(@Nullable PyTypedElement value, @NotNull TypeEvalContext context) {
if (value == null) return false;
final PyClassType type = as(context.getType(value), PyClassType.class);
if (type != null && !type.isDefinition()) {
final PyBuiltinCache builtinCache = PyBuiltinCache.getInstance(value);
final Set<PyClass> forbiddenClasses = StreamEx
.of(builtinCache.getListType(), builtinCache.getSetType(), builtinCache.getDictType())
.nonNull()
.map(PyClassType::getPyClass)
.toSet();
final PyClass cls = type.getPyClass();
return forbiddenClasses.contains(cls) || ContainerUtil.exists(cls.getAncestorClasses(context), forbiddenClasses::contains);
}
return false;
}
public static void addDecorator(@NotNull PyFunction function, @NotNull String decorator) {
final PyDecoratorList currentDecorators = function.getDecoratorList();
final List<String> decoTexts = new ArrayList<>();
decoTexts.add(decorator);
if (currentDecorators != null) {
for (PyDecorator deco : currentDecorators.getDecorators()) {
decoTexts.add(deco.getText());
}
}
final PyElementGenerator generator = PyElementGenerator.getInstance(function.getProject());
final PyDecoratorList newDecorators = generator.createDecoratorList(decoTexts.toArray(ArrayUtil.EMPTY_STRING_ARRAY));
if (currentDecorators != null) {
currentDecorators.replace(newDecorators);
}
else {
function.addBefore(newDecorators, function.getFirstChild());
}
}
/**
* This helper class allows to collect various information about AST nodes composing {@link PyStringLiteralExpression}.
*/
public static final class StringNodeInfo {
private final ASTNode myNode;
private final String myPrefix;
private final String myQuote;
private final TextRange myContentRange;
public StringNodeInfo(@NotNull ASTNode node) {
if (!PyTokenTypes.STRING_NODES.contains(node.getElementType())) {
throw new IllegalArgumentException("Node must be valid Python string literal token, but " + node.getElementType() + " was given");
}
myNode = node;
final String nodeText = node.getText();
final int prefixLength = PyStringLiteralExpressionImpl.getPrefixLength(nodeText);
myPrefix = nodeText.substring(0, prefixLength);
myContentRange = PyStringLiteralExpressionImpl.getNodeTextRange(nodeText);
myQuote = nodeText.substring(prefixLength, myContentRange.getStartOffset());
}
public StringNodeInfo(@NotNull PsiElement element) {
this(element.getNode());
}
@NotNull
public ASTNode getNode() {
return myNode;
}
/**
* @return string prefix, e.g. "UR", "b" etc.
*/
@NotNull
public String getPrefix() {
return myPrefix;
}
/**
* @return content of the string node between quotes
*/
@NotNull
public String getContent() {
return myContentRange.substring(myNode.getText());
}
/**
* @return <em>relative</em> range of the content (excluding prefix and quotes)
* @see #getAbsoluteContentRange()
*/
@NotNull
public TextRange getContentRange() {
return myContentRange;
}
/**
* @return <em>absolute</em> content range that accounts offset of the {@link #getNode() node} in the document
*/
@NotNull
public TextRange getAbsoluteContentRange() {
return getContentRange().shiftRight(myNode.getStartOffset());
}
/**
* @return the first character of {@link #getQuote()}
*/
public char getSingleQuote() {
return myQuote.charAt(0);
}
@NotNull
public String getQuote() {
return myQuote;
}
public boolean isTripleQuoted() {
return myQuote.length() == 3;
}
/**
* @return true if string literal ends with starting quote
*/
public boolean isTerminated() {
final String text = myNode.getText();
return text.length() - myPrefix.length() >= myQuote.length() * 2 && text.endsWith(myQuote);
}
/**
* @return true if given string node contains "u" or "U" prefix
*/
public boolean isUnicode() {
return PyStringLiteralUtil.isUnicodePrefix(myPrefix);
}
/**
* @return true if given string node contains "r" or "R" prefix
*/
public boolean isRaw() {
return PyStringLiteralUtil.isRawPrefix(myPrefix);
}
/**
* @return true if given string node contains "b" or "B" prefix
*/
public boolean isBytes() {
return PyStringLiteralUtil.isBytesPrefix(myPrefix);
}
/**
* @return true if given string node contains "f" or "F" prefix
*/
public boolean isFormatted() {
return PyStringLiteralUtil.isFormattedPrefix(myPrefix);
}
/**
* @return true if other string node has the same decorations, i.e. quotes and prefix
*/
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
StringNodeInfo info = (StringNodeInfo)o;
return getQuote().equals(info.getQuote()) &&
isRaw() == info.isRaw() &&
isUnicode() == info.isUnicode() &&
isBytes() == info.isBytes();
}
}
public static class IterHelper { // TODO: rename sanely
private IterHelper() {}
@Nullable
public static PsiNamedElement findName(Iterable<PsiNamedElement> it, String name) {
PsiNamedElement ret = null;
for (PsiNamedElement elt : it) {
if (elt != null) {
// qualified refs don't match by last name, and we're not checking FQNs here
if (elt instanceof PyQualifiedExpression && ((PyQualifiedExpression)elt).isQualified()) continue;
if (name.equals(elt.getName())) { // plain name matches
ret = elt;
break;
}
}
}
return ret;
}
}
}
| apache-2.0 |
shahramgdz/hibernate-validator | engine/src/test/java/org/hibernate/validator/test/internal/engine/methodvalidation/returnvaluevalidation/ContactService.java | 482 | /*
* Hibernate Validator, declare and validate application constraints
*
* License: Apache License, Version 2.0
* See the license.txt file in the root directory or <http://www.apache.org/licenses/LICENSE-2.0>.
*/
package org.hibernate.validator.test.internal.engine.methodvalidation.returnvaluevalidation;
import javax.validation.Valid;
/**
* @author Hardy Ferentschik
*/
public interface ContactService {
void validateValidBeanParamConstraint(@Valid ContactBean bean);
}
| apache-2.0 |
alefherrera/sisalud | SiSaludSRL/src/main/java/ar/edu/ungs/presentation/profesional/TurnosPorProfesionalPage.java | 2830 | package ar.edu.ungs.presentation.profesional;
import java.awt.BorderLayout;
import javax.swing.JFrame;
import javax.swing.JPanel;
import javax.swing.border.EmptyBorder;
import javax.swing.JButton;
import javax.swing.JComboBox;
import javax.swing.JLabel;
import javax.swing.JScrollPane;
import ar.edu.ungs.business.dto.ComboItemDTO;
public class TurnosPorProfesionalPage extends JFrame {
/**
*
*/
private static final long serialVersionUID = 1L;
private JPanel contentPane;
private JComboBox<ComboItemDTO> cmbEspecialidad;
private JComboBox<ComboItemDTO> cmbProfesional;
private JButton btnBuscar;
private JButton btnImprimir;
/**
* Create the frame.
*/
public TurnosPorProfesionalPage() {
setTitle("Turnos por Profesional");
setDefaultCloseOperation(JFrame.DISPOSE_ON_CLOSE);
setBounds(100, 100, 528, 279);
getContentPane().setLayout(null);
contentPane = new JPanel();
contentPane.setBorder(new EmptyBorder(5, 5, 5, 5));
contentPane.setLayout(new BorderLayout(0, 0));
setContentPane(contentPane);
btnImprimir = new JButton("Imprimir");
btnImprimir.setBounds(391, 93, 97, 25);
getContentPane().add(btnImprimir);
cmbEspecialidad = new JComboBox<ComboItemDTO>();
cmbEspecialidad.setBounds(88, 27, 102, 22);
getContentPane().add(cmbEspecialidad);
cmbProfesional = new JComboBox<ComboItemDTO>();
cmbProfesional.setBounds(279, 27, 102, 22);
getContentPane().add(cmbProfesional);
JLabel lblEspecialidad = new JLabel("Especialidad:");
lblEspecialidad.setBounds(12, 30, 75, 16);
getContentPane().add(lblEspecialidad);
JLabel lblProfesional = new JLabel("Profesional:");
lblProfesional.setBounds(206, 30, 83, 16);
getContentPane().add(lblProfesional);
btnBuscar = new JButton("Buscar");
btnBuscar.setBounds(385, 26, 97, 25);
getContentPane().add(btnBuscar);
JScrollPane scrollPane = new JScrollPane();
scrollPane.setBounds(12, 62, 374, 104);
getContentPane().add(scrollPane);
}
public JPanel getContentPane() {
return contentPane;
}
public void setContentPane(JPanel contentPane) {
this.contentPane = contentPane;
}
public JComboBox<ComboItemDTO> getCmbEspecialidad() {
return cmbEspecialidad;
}
public void setCmbEspecialidad(JComboBox<ComboItemDTO> cmbEspecialidad) {
this.cmbEspecialidad = cmbEspecialidad;
}
public JComboBox<ComboItemDTO> getCmbProfesional() {
return cmbProfesional;
}
public void setCmbProfesional(JComboBox<ComboItemDTO> cmbProfesional) {
this.cmbProfesional = cmbProfesional;
}
public JButton getBtnBuscar() {
return btnBuscar;
}
public void setBtnBuscar(JButton btnBuscar) {
this.btnBuscar = btnBuscar;
}
public JButton getBtnImprimir() {
return btnImprimir;
}
public void setBtnImprimir(JButton btnImprimir) {
this.btnImprimir = btnImprimir;
}
}
| apache-2.0 |
fmntf/appinventor-sources | appinventor/buildserver/src/com/google/appinventor/buildserver/Compiler.java | 62612 | // -*- mode: java; c-basic-offset: 2; -*-
// Copyright 2009-2011 Google, All Rights reserved
// Copyright 2011-2016 MIT, All rights reserved
// Released under the Apache License, Version 2.0
// http://www.apache.org/licenses/LICENSE-2.0
package com.google.appinventor.buildserver;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Charsets;
import com.google.common.base.Strings;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.google.common.io.Files;
import com.google.common.io.Resources;
import com.android.sdklib.build.ApkBuilder;
import org.codehaus.jettison.json.JSONArray;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
import java.awt.image.BufferedImage;
import java.io.BufferedWriter;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.PrintStream;
import java.io.Reader;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.imageio.ImageIO;
/**
* Main entry point for the YAIL compiler.
*
* <p>Supplies entry points for building Young Android projects.
*
* @author markf@google.com (Mark Friedman)
* @author lizlooney@google.com (Liz Looney)
*/
public final class Compiler {
/**
* reading guide:
* Comp == Component, comp == component, COMP == COMPONENT
* Ext == External, ext == external, EXT == EXTERNAL
*/
public static int currentProgress = 10;
// Kawa and DX processes can use a lot of memory. We only launch one Kawa or DX process at a time.
private static final Object SYNC_KAWA_OR_DX = new Object();
private static final String SLASH = File.separator;
private static final String COLON = File.pathSeparator;
private static final String WEBVIEW_ACTIVITY_CLASS =
"com.google.appinventor.components.runtime.WebViewActivity";
// Copied from SdkLevel.java (which isn't in our class path so we duplicate it here)
private static final String LEVEL_GINGERBREAD_MR1 = "10";
public static final String RUNTIME_FILES_DIR = "/" + "files" + "/";
// Build info constants. Used for permissions, libraries and assets.
// Must match ComponentProcessor.ARMEABI_V7A_SUFFIX
private static final String ARMEABI_V7A_SUFFIX = "-v7a";
// Must match Component.ASSET_DIRECTORY
private static final String ASSET_DIRECTORY = "component";
// Must match ComponentListGenerator.ASSETS_TARGET
private static final String ASSETS_TARGET = "assets";
// Must match ComponentListGenerator.LIBRARIES_TARGET
public static final String LIBRARIES_TARGET = "libraries";
// Must match ComponentListGenerator.NATIVE_TARGET
public static final String NATIVE_TARGET = "native";
// Must match ComponentListGenerator.PERMISSIONS_TARGET
private static final String PERMISSIONS_TARGET = "permissions";
// Must match ComponentListGenerator.BROADCAST_RECEIVER_TARGET
private static final String BROADCAST_RECEIVER_TARGET = "broadcastReceiver";
// Native library directory names
private static final String LIBS_DIR_NAME = "libs";
private static final String ARMEABI_DIR_NAME = "armeabi";
private static final String ARMEABI_V7A_DIR_NAME = "armeabi-v7a";
private static final String EXT_COMPS_DIR_NAME = "external_comps";
private static final String DEFAULT_APP_NAME = "";
private static final String DEFAULT_ICON = RUNTIME_FILES_DIR + "ya.png";
private static final String DEFAULT_VERSION_CODE = "1";
private static final String DEFAULT_VERSION_NAME = "1.0";
private static final String DEFAULT_MIN_SDK = "4";
/*
* Resource paths to yail runtime, runtime library files and sdk tools.
* To get the real file paths, call getResource() with one of these constants.
*/
private static final String ACRA_RUNTIME =
RUNTIME_FILES_DIR + "acra-4.4.0.jar";
private static final String ANDROID_RUNTIME =
RUNTIME_FILES_DIR + "android.jar";
private static final String COMP_BUILD_INFO =
RUNTIME_FILES_DIR + "simple_components_build_info.json";
private static final String DX_JAR =
RUNTIME_FILES_DIR + "dx.jar";
private static final String KAWA_RUNTIME =
RUNTIME_FILES_DIR + "kawa.jar";
private static final String SIMPLE_ANDROID_RUNTIME_JAR =
RUNTIME_FILES_DIR + "AndroidRuntime.jar";
private static final String LINUX_AAPT_TOOL =
"/tools/linux/aapt";
private static final String LINUX_ZIPALIGN_TOOL =
"/tools/linux/zipalign";
private static final String MAC_AAPT_TOOL =
"/tools/mac/aapt";
private static final String MAC_ZIPALIGN_TOOL =
"/tools/mac/zipalign";
private static final String WINDOWS_AAPT_TOOL =
"/tools/windows/aapt";
private static final String WINDOWS_ZIPALIGN_TOOL =
"/tools/windows/zipalign";
@VisibleForTesting
static final String YAIL_RUNTIME = RUNTIME_FILES_DIR + "runtime.scm";
private final ConcurrentMap<String, Set<String>> assetsNeeded =
new ConcurrentHashMap<String, Set<String>>();
private final ConcurrentMap<String, Set<String>> libsNeeded =
new ConcurrentHashMap<String, Set<String>>();
private final ConcurrentMap<String, Set<String>> nativeLibsNeeded =
new ConcurrentHashMap<String, Set<String>>();
private final ConcurrentMap<String, Set<String>> permissionsNeeded =
new ConcurrentHashMap<String, Set<String>>();
private final Set<String> uniqueLibsNeeded = Sets.newHashSet();
private final ConcurrentMap<String, Set<String>> componentBroadcastReceiver =
new ConcurrentHashMap<String, Set<String>>();
/**
* Map used to hold the names and paths of resources that we've written out
* as temp files.
* Don't use this map directly. Please call getResource() with one of the
* constants above to get the (temp file) path to a resource.
*/
private static final ConcurrentMap<String, File> resources =
new ConcurrentHashMap<String, File>();
// TODO(user,lizlooney): i18n here and in lines below that call String.format(...)
private static final String COMPILATION_ERROR =
"Error: Your build failed due to an error when compiling %s.\n";
private static final String ERROR_IN_STAGE =
"Error: Your build failed due to an error in the %s stage, " +
"not because of an error in your program.\n";
private static final String ICON_ERROR =
"Error: Your build failed because %s cannot be used as the application icon.\n";
private static final String NO_USER_CODE_ERROR =
"Error: No user code exists.\n";
private final int childProcessRamMb; // Maximum ram that can be used by a child processes, in MB.
private final boolean isForCompanion;
private final Project project;
private final PrintStream out;
private final PrintStream err;
private final PrintStream userErrors;
private static boolean isUdoo = true;
private File libsDir; // The directory that will contain any native libraries for packaging
private String dexCacheDir;
private boolean hasSecondDex = false; // True if classes2.dex should be added to the APK
private JSONArray simpleCompsBuildInfo;
private JSONArray extCompsBuildInfo;
private Set<String> simpleCompTypes; // types needed by the project
private Set<String> extCompTypes; // types needed by the project
private static final Logger LOG = Logger.getLogger(Compiler.class.getName());
/*
* Generate the set of Android permissions needed by this project.
*/
@VisibleForTesting
void generatePermissions() {
try {
loadJsonInfo(permissionsNeeded, PERMISSIONS_TARGET);
if (project != null) { // Only do this if we have a project (testing doesn't provide one :-( ).
LOG.log(Level.INFO, "usesLocation = " + project.getUsesLocation());
if (project.getUsesLocation().equals("True")) { // Add location permissions if any WebViewer requests it
Set<String> locationPermissions = Sets.newHashSet(); // via a Property.
// See ProjectEditor.recordLocationSettings()
locationPermissions.add("android.permission.ACCESS_FINE_LOCATION");
locationPermissions.add("android.permission.ACCESS_COARSE_LOCATION");
locationPermissions.add("android.permission.ACCESS_MOCK_LOCATION");
permissionsNeeded.put("com.google.appinventor.components.runtime.WebViewer", locationPermissions);
}
}
} catch (IOException e) {
// This is fatal.
e.printStackTrace();
userErrors.print(String.format(ERROR_IN_STAGE, "Permissions"));
} catch (JSONException e) {
// This is fatal, but shouldn't actually ever happen.
e.printStackTrace();
userErrors.print(String.format(ERROR_IN_STAGE, "Permissions"));
}
int n = 0;
for (String type : permissionsNeeded.keySet()) {
n += permissionsNeeded.get(type).size();
}
System.out.println("Permissions needed, n = " + n);
}
// Just used for testing
@VisibleForTesting
Map<String,Set<String>> getPermissions() {
return permissionsNeeded;
}
/*
* Generate the set of Android libraries needed by this project.
*/
@VisibleForTesting
void generateLibNames() {
try {
loadJsonInfo(libsNeeded, LIBRARIES_TARGET);
} catch (IOException e) {
// This is fatal.
e.printStackTrace();
userErrors.print(String.format(ERROR_IN_STAGE, "Libraries"));
} catch (JSONException e) {
// This is fatal, but shouldn't actually ever happen.
e.printStackTrace();
userErrors.print(String.format(ERROR_IN_STAGE, "Libraries"));
}
int n = 0;
for (String type : libsNeeded.keySet()) {
n += libsNeeded.get(type).size();
}
System.out.println("Libraries needed, n = " + n);
}
/*
* Generate the set of conditionally included libraries needed by this project.
*/
@VisibleForTesting
void generateNativeLibNames() {
try {
loadJsonInfo(nativeLibsNeeded, NATIVE_TARGET);
} catch (IOException e) {
// This is fatal.
e.printStackTrace();
userErrors.print(String.format(ERROR_IN_STAGE, "Native Libraries"));
} catch (JSONException e) {
// This is fatal, but shouldn't actually ever happen.
e.printStackTrace();
userErrors.print(String.format(ERROR_IN_STAGE, "Native Libraries"));
}
int n = 0;
for (String type : nativeLibsNeeded.keySet()) {
n += nativeLibsNeeded.get(type).size();
}
System.out.println("Native Libraries needed, n = " + n);
}
/*
* Generate the set of conditionally included assets needed by this project.
*/
@VisibleForTesting
void generateAssets() {
try {
loadJsonInfo(assetsNeeded, ASSETS_TARGET);
} catch (IOException e) {
// This is fatal.
e.printStackTrace();
userErrors.print(String.format(ERROR_IN_STAGE, "Assets"));
} catch (JSONException e) {
// This is fatal, but shouldn't actually ever happen.
e.printStackTrace();
userErrors.print(String.format(ERROR_IN_STAGE, "Assets"));
}
int n = 0;
for (String type : assetsNeeded.keySet()) {
n += assetsNeeded.get(type).size();
}
System.out.println("Component assets needed, n = " + n);
}
/**
* For each component that declares a Broadcast Receiver, a String will be generated, containing the class
* name of the broadcast receiver and followed by any actions it needs (all as one String separated by commas).
* @return Set of Strings, one for each Broadcast Receiver
*/
@VisibleForTesting
Set<String> generateBroadcastReceiver() {
try {
loadJsonInfo(componentBroadcastReceiver, BROADCAST_RECEIVER_TARGET);
}
catch (IOException e) {
// This is fatal.
e.printStackTrace();
userErrors.print(String.format(ERROR_IN_STAGE, "BroadcastReceiver"));
return null;
} catch (JSONException e) {
// This is fatal, but shouldn't actually ever happen.
e.printStackTrace();
userErrors.print(String.format(ERROR_IN_STAGE, "BroadcastReceiver"));
return null;
}
Set<String> broadcastReceivers = Sets.newHashSet();
for (String componentType : componentBroadcastReceiver.keySet()) {
broadcastReceivers.addAll(componentBroadcastReceiver.get(componentType));
}
return broadcastReceivers;
}
// This patches around a bug in AAPT (and other placed in Android)
// where an ampersand in the name string breaks AAPT.
private String cleanName(String name) {
return name.replace("&", "and");
}
/*
* Creates an AndroidManifest.xml file needed for the Android application.
*/
private boolean writeAndroidManifest(File manifestFile, Set<String> broadcastReceiversNeeded) {
// Create AndroidManifest.xml
String mainClass = project.getMainClass();
String packageName = Signatures.getPackageName(mainClass);
String className = Signatures.getClassName(mainClass);
String projectName = project.getProjectName();
String vCode = (project.getVCode() == null) ? DEFAULT_VERSION_CODE : project.getVCode();
String vName = (project.getVName() == null) ? DEFAULT_VERSION_NAME : cleanName(project.getVName());
String aName = (project.getAName() == null) ? DEFAULT_APP_NAME : cleanName(project.getAName());
String minSDK = DEFAULT_MIN_SDK;
LOG.log(Level.INFO, "VCode: " + project.getVCode());
LOG.log(Level.INFO, "VName: " + project.getVName());
// TODO(user): Use com.google.common.xml.XmlWriter
try {
BufferedWriter out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(manifestFile), "UTF-8"));
out.write("<?xml version=\"1.0\" encoding=\"utf-8\"?>\n");
// TODO(markf) Allow users to set versionCode and versionName attributes.
// See http://developer.android.com/guide/publishing/publishing.html for
// more info.
out.write("<manifest " +
"xmlns:android=\"http://schemas.android.com/apk/res/android\" " +
"package=\"" + packageName + "\" " +
// TODO(markf): uncomment the following line when we're ready to enable publishing to the
// Android Market.
"android:versionCode=\"" + vCode +"\" " + "android:versionName=\"" + vName + "\" " +
">\n");
// If we are building the Wireless Debugger (AppInventorDebugger) add the uses-feature tag which
// is used by the Google Play store to determine which devices the app is available for. By adding
// these lines we indicate that we use these features BUT THAT THEY ARE NOT REQUIRED so it is ok
// to make the app available on devices that lack the feature. Without these lines the Play Store
// makes a guess based on permissions and assumes that they are required features.
if (isForCompanion) {
out.write(" <uses-feature android:name=\"android.hardware.bluetooth\" android:required=\"false\" />\n");
out.write(" <uses-feature android:name=\"android.hardware.location\" android:required=\"false\" />\n");
out.write(" <uses-feature android:name=\"android.hardware.telephony\" android:required=\"false\" />\n");
out.write(" <uses-feature android:name=\"android.hardware.location.network\" android:required=\"false\" />\n");
out.write(" <uses-feature android:name=\"android.hardware.location.gps\" android:required=\"false\" />\n");
out.write(" <uses-feature android:name=\"android.hardware.microphone\" android:required=\"false\" />\n");
out.write(" <uses-feature android:name=\"android.hardware.touchscreen\" android:required=\"false\" />\n");
out.write(" <uses-feature android:name=\"android.hardware.camera\" android:required=\"false\" />\n");
out.write(" <uses-feature android:name=\"android.hardware.camera.autofocus\" android:required=\"false\" />\n");
out.write(" <uses-feature android:name=\"android.hardware.wifi\" />\n"); // We actually require wifi
}
// Firebase requires at least API 10 (Gingerbread MR1)
if (simpleCompTypes.contains("com.google.appinventor.components.runtime.FirebaseDB") && !isForCompanion) {
minSDK = LEVEL_GINGERBREAD_MR1;
}
// ADK requires SDK 12+
if (isUdoo) {
minSDK = "21";
}
// make permissions unique by putting them in one set
Set<String> permissions = Sets.newHashSet();
for (Set<String> compPermissions : permissionsNeeded.values()) {
permissions.addAll(compPermissions);
}
for (String permission : permissions) {
out.write(" <uses-permission android:name=\"" + permission + "\" />\n");
}
if (isForCompanion) { // This is so ACRA can do a logcat on phones older then Jelly Bean
out.write(" <uses-permission android:name=\"android.permission.READ_LOGS\" />\n");
}
// TODO(markf): Change the minSdkVersion below if we ever require an SDK beyond 1.5.
// The market will use the following to filter apps shown to devices that don't support
// the specified SDK version. We right now support building for minSDK 4.
// We might also want to allow users to specify minSdk version or targetSDK version.
out.write(" <uses-sdk android:minSdkVersion=\"" + minSDK + "\" android:targetSdkVersion=\"21\" />\n");
if (isUdoo) {
out.write("<uses-feature android:name=\"android.hardware.usb.accessory\" android:required=\"true\" />");
}
out.write(" <application ");
// TODO(markf): The preparing to publish doc at
// http://developer.android.com/guide/publishing/preparing.html suggests removing the
// 'debuggable=true' but I'm not sure that our users would want that while they're still
// testing their packaged apps. Maybe we should make that an option, somehow.
// TODONE(jis): Turned off debuggable. No one really uses it and it represents a security
// risk for App Inventor App end-users.
out.write("android:debuggable=\"false\" ");
if (aName.equals("")) {
out.write("android:label=\"" + projectName + "\" ");
} else {
out.write("android:label=\"" + aName + "\" ");
}
out.write("android:icon=\"@drawable/ya\" ");
if (isForCompanion) { // This is to hook into ACRA
out.write("android:name=\"com.google.appinventor.components.runtime.ReplApplication\" ");
} else {
out.write("android:name=\"com.google.appinventor.components.runtime.multidex.MultiDexApplication\" ");
}
out.write(">\n");
if (isUdoo) {
out.write("<uses-library android:name=\"com.android.future.usb.accessory\" />");
out.write("<meta-data android:name=\"com.google.android.gms.version\" android:value=\"9452000\" />");
out.write(" <meta-data android:name=\"com.google.android.gms.vision.DEPENDENCIES\" android:value=\"face\" />");
}
for (Project.SourceDescriptor source : project.getSources()) {
String formClassName = source.getQualifiedName();
// String screenName = formClassName.substring(formClassName.lastIndexOf('.') + 1);
boolean isMain = formClassName.equals(mainClass);
if (isMain) {
// The main activity of the application.
out.write(" <activity android:name=\"." + className + "\" ");
} else {
// A secondary activity of the application.
out.write(" <activity android:name=\"" + formClassName + "\" ");
}
// This line is here for NearField and NFC. It keeps the activity from
// restarting every time NDEF_DISCOVERED is signaled.
// TODO: Check that this doesn't screw up other components. Also, it might be
// better to do this programmatically when the NearField component is created, rather
// than here in the manifest.
if (simpleCompTypes.contains("com.google.appinventor.components.runtime.NearField") &&
!isForCompanion && isMain) {
out.write("android:launchMode=\"singleTask\" ");
} else if (isMain && isForCompanion) {
out.write("android:launchMode=\"singleTop\" ");
}
out.write("android:windowSoftInputMode=\"stateHidden\" ");
// The keyboard option prevents the app from stopping when a external (bluetooth)
// keyboard is attached.
out.write("android:configChanges=\"orientation|keyboardHidden|keyboard\">\n");
out.write(" <intent-filter>\n");
out.write(" <action android:name=\"android.intent.action.MAIN\" />\n");
if (isMain) {
out.write(" <category android:name=\"android.intent.category.LAUNCHER\" />\n");
}
out.write(" </intent-filter>\n");
if (isMain && isUdoo) {
out.write("<intent-filter>");
out.write(" <action android:name=\"android.hardware.usb.action.USB_ACCESSORY_ATTACHED\" />");
out.write(" <action android:name=\"android.hardware.usb.action.USB_ACCESSORY_DETACHED\" />");
out.write("</intent-filter>");
out.write("<meta-data android:name=\"android.hardware.usb.action.USB_ACCESSORY_ATTACHED\" android:resource=\"@xml/usb_accessory_filter\"/>");
}
if (simpleCompTypes.contains("com.google.appinventor.components.runtime.NearField") &&
!isForCompanion && isMain) {
// make the form respond to NDEF_DISCOVERED
// this will trigger the form's onResume method
// For now, we're handling text/plain only,but we can add more and make the Nearfield
// component check the type.
out.write(" <intent-filter>\n");
out.write(" <action android:name=\"android.nfc.action.NDEF_DISCOVERED\" />\n");
out.write(" <category android:name=\"android.intent.category.DEFAULT\" />\n");
out.write(" <data android:mimeType=\"text/plain\" />\n");
out.write(" </intent-filter>\n");
}
out.write(" </activity>\n");
}
// Add ListPickerActivity to the manifest only if a ListPicker component is used in the app
if (simpleCompTypes.contains("com.google.appinventor.components.runtime.ListPicker")){
// TODO(sharon): temporary until we add support for new activities
String LIST_ACTIVITY_CLASS =
"com.google.appinventor.components.runtime.ListPickerActivity";
out.write(" <activity android:name=\"" + LIST_ACTIVITY_CLASS + "\" " +
"android:configChanges=\"orientation|keyboardHidden\" " +
"android:screenOrientation=\"behind\">\n");
out.write(" </activity>\n");
}
// Add WebViewActivity to the manifest only if a Twitter component is used in the app
if (simpleCompTypes.contains("com.google.appinventor.components.runtime.Twitter")){
String WEBVIEW_ACTIVITY_CLASS =
"com.google.appinventor.components.runtime.WebViewActivity";
out.write(" <activity android:name=\"" + WEBVIEW_ACTIVITY_CLASS + "\" " +
"android:configChanges=\"orientation|keyboardHidden\" " +
"android:screenOrientation=\"behind\">\n");
out.write(" <intent-filter>\n");
out.write(" <action android:name=\"android.intent.action.MAIN\" />\n");
out.write(" </intent-filter>\n");
out.write(" </activity>\n");
}
if (simpleCompTypes.contains("com.google.appinventor.components.runtime.BarcodeScanner")) {
// Barcode Activity
out.write(" <activity android:name=\"com.google.zxing.client.android.AppInvCaptureActivity\"\n");
out.write(" android:screenOrientation=\"landscape\"\n");
out.write(" android:stateNotNeeded=\"true\"\n");
out.write(" android:configChanges=\"orientation|keyboardHidden\"\n");
out.write(" android:theme=\"@android:style/Theme.NoTitleBar.Fullscreen\"\n");
out.write(" android:windowSoftInputMode=\"stateAlwaysHidden\" />\n");
}
// The format for each Broadcast Receiver in broadcastReceiversNeeded is "className,Action1,Action2,..." where
// the class name is mandatory, and actions are optional (and as many as needed).
for (String broadcastReceiver : broadcastReceiversNeeded) {
String[] brNameAndActions = broadcastReceiver.split(",");
if (brNameAndActions.length == 0) continue;
out.write(
"<receiver android:name=\"" + brNameAndActions[0] + "\" >\n");
if (brNameAndActions.length > 1){
out.write(" <intent-filter>\n");
for (int i = 1; i < brNameAndActions.length; i++) {
out.write(" <action android:name=\"" + brNameAndActions[i] + "\" />\n");
}
out.write(" </intent-filter>\n");
}
out.write("</receiver> \n");
}
out.write(" </application>\n");
out.write("</manifest>\n");
out.close();
} catch (IOException e) {
e.printStackTrace();
userErrors.print(String.format(ERROR_IN_STAGE, "manifest"));
return false;
}
return true;
}
/**
* Builds a YAIL project.
*
* @param project project to build
* @param compTypes component types used in the project
* @param out stdout stream for compiler messages
* @param err stderr stream for compiler messages
* @param userErrors stream to write user-visible error messages
* @param keystoreFilePath
* @param childProcessRam maximum RAM for child processes, in MBs.
* @return {@code true} if the compilation succeeds, {@code false} otherwise
* @throws JSONException
* @throws IOException
*/
public static boolean compile(Project project, Set<String> compTypes,
PrintStream out, PrintStream err, PrintStream userErrors,
boolean isForCompanion, String keystoreFilePath,
int childProcessRam, String dexCacheDir) throws IOException, JSONException {
long start = System.currentTimeMillis();
// Create a new compiler instance for the compilation
Compiler compiler = new Compiler(project, compTypes, out, err, userErrors, isForCompanion,
childProcessRam, dexCacheDir);
compiler.generateAssets();
compiler.generateLibNames();
compiler.generateNativeLibNames();
compiler.generatePermissions();
// Create build directory.
File buildDir = createDir(project.getBuildDirectory());
// Prepare application icon.
out.println("________Preparing application icon");
File resDir = createDir(buildDir, "res");
File drawableDir = createDir(resDir, "drawable");
if (!compiler.prepareApplicationIcon(new File(drawableDir, "ya.png"))) {
return false;
}
setProgress(10);
if (isUdoo) {
File xmlDir = createDirectory(resDir, "xml");
File file = new File(xmlDir, "usb_accessory_filter.xml");
String usbRes = "<?xml version=\"1.0\" encoding=\"utf-8\"?><resources><usb-accessory manufacturer=\"UDOO\" model=\"AppInventor\" version=\"1.0\" /></resources>";
try {
BufferedWriter writer = new BufferedWriter(new FileWriter(file));
writer.write(usbRes);
writer.close();
} catch (IOException e) {
e.printStackTrace();
return false;
}
}
// Create anim directory and animation xml files
out.println("________Creating animation xml");
File animDir = createDir(resDir, "anim");
if (!compiler.createAnimationXml(animDir)) {
return false;
}
// Determine broadcast receiver names and actions.
out.println("________Determining BR names and actions");
Set<String> broadcastReceiversNeeded = compiler.generateBroadcastReceiver();
if (broadcastReceiversNeeded == null) {
return false;
}
setProgress(15);
// Generate AndroidManifest.xml
out.println("________Generating manifest file");
File manifestFile = new File(buildDir, "AndroidManifest.xml");
if (!compiler.writeAndroidManifest(manifestFile, broadcastReceiversNeeded)) {
return false;
}
setProgress(20);
// Insert native libraries
out.println("________Attaching native libraries");
if (!compiler.insertNativeLibs(buildDir)) {
return false;
}
// Add raw assets to sub-directory of project assets.
out.println("________Attaching component assets");
if (!compiler.attachCompAssets()) {
return false;
}
// Create class files.
out.println("________Compiling source files");
File classesDir = createDir(buildDir, "classes");
if (!compiler.generateClasses(classesDir)) {
return false;
}
setProgress(35);
// Invoke dx on class files
out.println("________Invoking DX");
// TODO(markf): Running DX is now pretty slow (~25 sec overhead the first time and ~15 sec
// overhead for subsequent runs). I think it's because of the need to dx the entire
// kawa runtime every time. We should probably only do that once and then copy all the
// kawa runtime dx files into the generated classes.dex (which would only contain the
// files compiled for this project).
// Aargh. It turns out that there's no way to manipulate .dex files to do the above. An
// Android guy suggested an alternate approach of shipping the kawa runtime .dex file as
// data with the application and then creating a new DexClassLoader using that .dex file
// and with the original app class loader as the parent of the new one.
// TODONE(zhuowei): Now using the new Android DX tool to merge dex files
// Needs to specify a writable cache dir on the command line that persists after shutdown
// Each pre-dexed file is identified via its MD5 hash (since the standard Android SDK's
// method of identifying via a hash of the path won't work when files
// are copied into temporary storage) and processed via a hacked up version of
// Android SDK's Dex Ant task
File tmpDir = createDirectory(buildDir, "tmp");
String dexedClassesDir = tmpDir.getAbsolutePath();
if (!compiler.runDx(classesDir, dexedClassesDir, false)) {
return false;
}
setProgress(85);
// Invoke aapt to package everything up
out.println("________Invoking AAPT");
File deployDir = createDir(buildDir, "deploy");
String tmpPackageName = deployDir.getAbsolutePath() + SLASH +
project.getProjectName() + ".ap_";
if (!compiler.runAaptPackage(manifestFile, resDir, tmpPackageName)) {
return false;
}
setProgress(90);
// Seal the apk with ApkBuilder
out.println("________Invoking ApkBuilder");
String apkAbsolutePath = deployDir.getAbsolutePath() + SLASH +
project.getProjectName() + ".apk";
if (!compiler.runApkBuilder(apkAbsolutePath, tmpPackageName, dexedClassesDir)) {
return false;
}
setProgress(95);
// Sign the apk file
out.println("________Signing the apk file");
if (!compiler.runJarSigner(apkAbsolutePath, keystoreFilePath)) {
return false;
}
// ZipAlign the apk file
out.println("________ZipAligning the apk file");
if (!compiler.runZipAlign(apkAbsolutePath, tmpDir)) {
return false;
}
setProgress(100);
out.println("Build finished in " +
((System.currentTimeMillis() - start) / 1000.0) + " seconds");
return true;
}
/*
* Creates all the animation xml files.
*/
private boolean createAnimationXml(File animDir) {
// Store the filenames, and their contents into a HashMap
// so that we can easily add more, and also to iterate
// through creating the files.
Map<String, String> files = new HashMap<String, String>();
files.put("fadein.xml", AnimationXmlConstants.FADE_IN_XML);
files.put("fadeout.xml", AnimationXmlConstants.FADE_OUT_XML);
files.put("hold.xml", AnimationXmlConstants.HOLD_XML);
files.put("zoom_enter.xml", AnimationXmlConstants.ZOOM_ENTER);
files.put("zoom_exit.xml", AnimationXmlConstants.ZOOM_EXIT);
files.put("zoom_enter_reverse.xml", AnimationXmlConstants.ZOOM_ENTER_REVERSE);
files.put("zoom_exit_reverse.xml", AnimationXmlConstants.ZOOM_EXIT_REVERSE);
files.put("slide_exit.xml", AnimationXmlConstants.SLIDE_EXIT);
files.put("slide_enter.xml", AnimationXmlConstants.SLIDE_ENTER);
files.put("slide_exit_reverse.xml", AnimationXmlConstants.SLIDE_EXIT_REVERSE);
files.put("slide_enter_reverse.xml", AnimationXmlConstants.SLIDE_ENTER_REVERSE);
files.put("slide_v_exit.xml", AnimationXmlConstants.SLIDE_V_EXIT);
files.put("slide_v_enter.xml", AnimationXmlConstants.SLIDE_V_ENTER);
files.put("slide_v_exit_reverse.xml", AnimationXmlConstants.SLIDE_V_EXIT_REVERSE);
files.put("slide_v_enter_reverse.xml", AnimationXmlConstants.SLIDE_V_ENTER_REVERSE);
for (String filename : files.keySet()) {
File file = new File(animDir, filename);
if (!writeXmlFile(file, files.get(filename))) {
return false;
}
}
return true;
}
/*
* Writes the given string input to the provided file.
*/
private boolean writeXmlFile(File file, String input) {
try {
BufferedWriter writer = new BufferedWriter(new FileWriter(file));
writer.write(input);
writer.close();
} catch (IOException e) {
e.printStackTrace();
return false;
}
return true;
}
/*
* Runs ApkBuilder by using the API instead of calling its main method because the main method
* can call System.exit(1), which will bring down our server.
*/
private boolean runApkBuilder(String apkAbsolutePath, String zipArchive, String dexedClassesDir) {
try {
ApkBuilder apkBuilder =
new ApkBuilder(apkAbsolutePath, zipArchive,
dexedClassesDir + File.separator + "classes.dex", null, System.out);
if (hasSecondDex) {
apkBuilder.addFile(new File(dexedClassesDir + File.separator + "classes2.dex"),
"classes2.dex");
}
apkBuilder.sealApk();
return true;
} catch (Exception e) {
// This is fatal.
e.printStackTrace();
LOG.warning("YAIL compiler - ApkBuilder failed.");
err.println("YAIL compiler - ApkBuilder failed.");
userErrors.print(String.format(ERROR_IN_STAGE, "ApkBuilder"));
return false;
}
}
/**
* Creates a new YAIL compiler.
*
* @param project project to build
* @param compTypes component types used in the project
* @param out stdout stream for compiler messages
* @param err stderr stream for compiler messages
* @param userErrors stream to write user-visible error messages
* @param childProcessMaxRam maximum RAM for child processes, in MBs.
*/
@VisibleForTesting
Compiler(Project project, Set<String> compTypes, PrintStream out, PrintStream err,
PrintStream userErrors, boolean isForCompanion,
int childProcessMaxRam, String dexCacheDir) {
this.project = project;
prepareCompTypes(compTypes);
readBuildInfo();
this.out = out;
this.err = err;
this.userErrors = userErrors;
this.isForCompanion = isForCompanion;
this.childProcessRamMb = childProcessMaxRam;
this.dexCacheDir = dexCacheDir;
}
/*
* Runs the Kawa compiler in a separate process to generate classes. Returns false if not able to
* create a class file for every source file in the project.
*
* As a side effect, we generate uniqueLibsNeeded which contains a set of libraries used by
* runDx. Each library appears in the set only once (which is why it is a set!). This is
* important because when we Dex the libraries, a given library can only appear once.
*
*/
private boolean generateClasses(File classesDir) {
try {
List<Project.SourceDescriptor> sources = project.getSources();
List<String> sourceFileNames = Lists.newArrayListWithCapacity(sources.size());
List<String> classFileNames = Lists.newArrayListWithCapacity(sources.size());
boolean userCodeExists = false;
for (Project.SourceDescriptor source : sources) {
String sourceFileName = source.getFile().getAbsolutePath();
LOG.log(Level.INFO, "source file: " + sourceFileName);
int srcIndex = sourceFileName.indexOf("/../src/");
String sourceFileRelativePath = sourceFileName.substring(srcIndex + 8);
String classFileName = (classesDir.getAbsolutePath() + "/" + sourceFileRelativePath)
.replace(YoungAndroidConstants.YAIL_EXTENSION, ".class");
if (System.getProperty("os.name").startsWith("Windows")) {
classFileName = classesDir.getAbsolutePath()
.replace(YoungAndroidConstants.YAIL_EXTENSION, ".class");
}
// Check whether user code exists by seeing if a left parenthesis exists at the beginning of
// a line in the file
// TODO(user): Replace with more robust test of empty source file.
if (!userCodeExists) {
Reader fileReader = new FileReader(sourceFileName);
try {
while (fileReader.ready()) {
int c = fileReader.read();
if (c == '(') {
userCodeExists = true;
break;
}
}
} finally {
fileReader.close();
}
}
sourceFileNames.add(sourceFileName);
classFileNames.add(classFileName);
}
if (!userCodeExists) {
userErrors.print(NO_USER_CODE_ERROR);
return false;
}
// Construct the class path including component libraries (jars)
String classpath =
getResource(KAWA_RUNTIME) + COLON +
getResource(ACRA_RUNTIME) + COLON +
getResource(SIMPLE_ANDROID_RUNTIME_JAR) + COLON;
// attach the jars of external comps
for (String type : extCompTypes) {
String sourcePath = getExtCompDirPath(type) + SIMPLE_ANDROID_RUNTIME_JAR;
classpath += sourcePath + COLON;
}
// Add component library names to classpath
for (String type : libsNeeded.keySet()) {
for (String lib : libsNeeded.get(type)) {
String sourcePath = "";
String pathSuffix = RUNTIME_FILES_DIR + lib;
if (simpleCompTypes.contains(type)) {
sourcePath = getResource(pathSuffix);
} else if (extCompTypes.contains(type)) {
sourcePath = getExtCompDirPath(type) + pathSuffix;
} else {
userErrors.print(String.format(ERROR_IN_STAGE, "Compile"));
return false;
}
uniqueLibsNeeded.add(sourcePath);
classpath += sourcePath + COLON;
}
}
classpath +=
getResource(ANDROID_RUNTIME);
System.out.println("Libraries Classpath = " + classpath);
String yailRuntime = getResource(YAIL_RUNTIME);
List<String> kawaCommandArgs = Lists.newArrayList();
int mx = childProcessRamMb - 200;
Collections.addAll(kawaCommandArgs,
System.getProperty("java.home") + "/bin/java",
"-Dfile.encoding=UTF-8",
"-mx" + mx + "M",
"-cp", classpath,
"kawa.repl",
"-f", yailRuntime,
"-d", classesDir.getAbsolutePath(),
"-P", Signatures.getPackageName(project.getMainClass()) + ".",
"-C");
// TODO(lizlooney) - we are currently using (and have always used) absolute paths for the
// source file names. The resulting .class files contain references to the source file names,
// including the name of the tmp directory that contains them. We may be able to avoid that
// by using source file names that are relative to the project root and using the project
// root as the working directory for the Kawa compiler process.
kawaCommandArgs.addAll(sourceFileNames);
kawaCommandArgs.add(yailRuntime);
String[] kawaCommandLine = kawaCommandArgs.toArray(new String[kawaCommandArgs.size()]);
long start = System.currentTimeMillis();
// Capture Kawa compiler stderr. The ODE server parses out the warnings and errors and adds
// them to the protocol buffer for logging purposes. (See
// buildserver/ProjectBuilder.processCompilerOutout.
ByteArrayOutputStream kawaOutputStream = new ByteArrayOutputStream();
boolean kawaSuccess;
synchronized (SYNC_KAWA_OR_DX) {
kawaSuccess = Execution.execute(null, kawaCommandLine,
System.out, new PrintStream(kawaOutputStream));
}
if (!kawaSuccess) {
LOG.log(Level.SEVERE, "Kawa compile has failed.");
}
String kawaOutput = kawaOutputStream.toString();
out.print(kawaOutput);
String kawaCompileTimeMessage = "Kawa compile time: " +
((System.currentTimeMillis() - start) / 1000.0) + " seconds";
out.println(kawaCompileTimeMessage);
LOG.info(kawaCompileTimeMessage);
// Check that all of the class files were created.
// If they weren't, return with an error.
for (String classFileName : classFileNames) {
File classFile = new File(classFileName);
if (!classFile.exists()) {
LOG.log(Level.INFO, "Can't find class file: " + classFileName);
String screenName = classFileName.substring(classFileName.lastIndexOf('/') + 1,
classFileName.lastIndexOf('.'));
userErrors.print(String.format(COMPILATION_ERROR, screenName));
return false;
}
}
} catch (IOException e) {
e.printStackTrace();
userErrors.print(String.format(ERROR_IN_STAGE, "Compile"));
return false;
}
return true;
}
private boolean runJarSigner(String apkAbsolutePath, String keystoreAbsolutePath) {
// TODO(user): maybe make a command line flag for the jarsigner location
String javaHome = System.getProperty("java.home");
// This works on Mac OS X.
File jarsignerFile = new File(javaHome + SLASH + "bin" +
SLASH + "jarsigner");
if (!jarsignerFile.exists()) {
// This works when a JDK is installed with the JRE.
jarsignerFile = new File(javaHome + SLASH + ".." + SLASH + "bin" +
SLASH + "jarsigner");
if (System.getProperty("os.name").startsWith("Windows")) {
jarsignerFile = new File(javaHome + SLASH + ".." + SLASH + "bin" +
SLASH + "jarsigner.exe");
}
if (!jarsignerFile.exists()) {
LOG.warning("YAIL compiler - could not find jarsigner.");
err.println("YAIL compiler - could not find jarsigner.");
userErrors.print(String.format(ERROR_IN_STAGE, "JarSigner"));
return false;
}
}
String[] jarsignerCommandLine = {
jarsignerFile.getAbsolutePath(),
"-digestalg", "SHA1",
"-sigalg", "MD5withRSA",
"-keystore", keystoreAbsolutePath,
"-storepass", "android",
apkAbsolutePath,
"AndroidKey"
};
if (!Execution.execute(null, jarsignerCommandLine, System.out, System.err)) {
LOG.warning("YAIL compiler - jarsigner execution failed.");
err.println("YAIL compiler - jarsigner execution failed.");
userErrors.print(String.format(ERROR_IN_STAGE, "JarSigner"));
return false;
}
return true;
}
private boolean runZipAlign(String apkAbsolutePath, File tmpDir) {
// TODO(user): add zipalign tool appinventor->lib->android->tools->linux and windows
// Need to make sure assets directory exists otherwise zipalign will fail.
createDir(project.getAssetsDirectory());
String zipAlignTool;
String osName = System.getProperty("os.name");
if (osName.equals("Mac OS X")) {
zipAlignTool = MAC_ZIPALIGN_TOOL;
} else if (osName.equals("Linux")) {
zipAlignTool = LINUX_ZIPALIGN_TOOL;
} else if (osName.startsWith("Windows")) {
zipAlignTool = WINDOWS_ZIPALIGN_TOOL;
} else {
LOG.warning("YAIL compiler - cannot run ZIPALIGN on OS " + osName);
err.println("YAIL compiler - cannot run ZIPALIGN on OS " + osName);
userErrors.print(String.format(ERROR_IN_STAGE, "ZIPALIGN"));
return false;
}
// TODO: create tmp file for zipaling result
String zipAlignedPath = tmpDir.getAbsolutePath() + SLASH + "zipaligned.apk";
// zipalign -f -v 4 infile.zip outfile.zip
String[] zipAlignCommandLine = {
getResource(zipAlignTool),
"-f",
"4",
apkAbsolutePath,
zipAlignedPath
};
long startZipAlign = System.currentTimeMillis();
// Using System.err and System.out on purpose. Don't want to pollute build messages with
// tools output
if (!Execution.execute(null, zipAlignCommandLine, System.out, System.err)) {
LOG.warning("YAIL compiler - ZIPALIGN execution failed.");
err.println("YAIL compiler - ZIPALIGN execution failed.");
userErrors.print(String.format(ERROR_IN_STAGE, "ZIPALIGN"));
return false;
}
if (!copyFile(zipAlignedPath, apkAbsolutePath)) {
LOG.warning("YAIL compiler - ZIPALIGN file copy failed.");
err.println("YAIL compiler - ZIPALIGN file copy failed.");
userErrors.print(String.format(ERROR_IN_STAGE, "ZIPALIGN"));
return false;
}
String zipALignTimeMessage = "ZIPALIGN time: " +
((System.currentTimeMillis() - startZipAlign) / 1000.0) + " seconds";
out.println(zipALignTimeMessage);
LOG.info(zipALignTimeMessage);
return true;
}
/*
* Loads the icon for the application, either a user provided one or the default one.
*/
private boolean prepareApplicationIcon(File outputPngFile) {
String userSpecifiedIcon = Strings.nullToEmpty(project.getIcon());
try {
BufferedImage icon;
if (!userSpecifiedIcon.isEmpty()) {
File iconFile = new File(project.getAssetsDirectory(), userSpecifiedIcon);
icon = ImageIO.read(iconFile);
if (icon == null) {
// This can happen if the iconFile isn't an image file.
// For example, icon is null if the file is a .wav file.
// TODO(lizlooney) - This happens if the user specifies a .ico file. We should fix that.
userErrors.print(String.format(ICON_ERROR, userSpecifiedIcon));
return false;
}
} else {
// Load the default image.
icon = ImageIO.read(Compiler.class.getResource(DEFAULT_ICON));
}
ImageIO.write(icon, "png", outputPngFile);
} catch (Exception e) {
e.printStackTrace();
// If the user specified the icon, this is fatal.
if (!userSpecifiedIcon.isEmpty()) {
userErrors.print(String.format(ICON_ERROR, userSpecifiedIcon));
return false;
}
}
return true;
}
private boolean runDx(File classesDir, String dexedClassesDir, boolean secondTry) {
List<File> libList = new ArrayList<File>();
List<File> inputList = new ArrayList<File>();
List<File> class2List = new ArrayList<File>();
inputList.add(classesDir); //this is a directory, and won't be cached into the dex cache
inputList.add(new File(getResource(SIMPLE_ANDROID_RUNTIME_JAR)));
inputList.add(new File(getResource(KAWA_RUNTIME)));
inputList.add(new File(getResource(ACRA_RUNTIME)));
for (String lib : uniqueLibsNeeded) {
libList.add(new File(lib));
}
// BEGIN DEBUG -- XXX --
// System.err.println("runDx -- libraries");
// for (File aFile : inputList) {
// System.err.println(" inputList => " + aFile.getAbsolutePath());
// }
// for (File aFile : libList) {
// System.err.println(" libList => " + aFile.getAbsolutePath());
// }
// END DEBUG -- XXX --
// attach the jars of external comps to the libraries list
for (String type : extCompTypes) {
String sourcePath = getExtCompDirPath(type) + SIMPLE_ANDROID_RUNTIME_JAR;
libList.add(new File(sourcePath));
}
int offset = libList.size();
// Note: The choice of 12 libraries is arbitrary. We note that things
// worked to put all libraries into the first classes.dex file when we
// had 16 libraries and broke at 17. So this is a conservative number
// to try.
if (!secondTry) { // First time through, try base + 12 libraries
if (offset > 12)
offset = 12;
} else {
offset = 0; // Add NO libraries the second time through!
}
for (int i = 0; i < offset; i++) {
inputList.add(libList.get(i));
}
if (libList.size() - offset > 0) { // Any left over for classes2?
for (int i = offset; i < libList.size(); i++) {
class2List.add(libList.get(i));
}
}
DexExecTask dexTask = new DexExecTask();
dexTask.setExecutable(getResource(DX_JAR));
dexTask.setOutput(dexedClassesDir + File.separator + "classes.dex");
dexTask.setChildProcessRamMb(childProcessRamMb);
if (dexCacheDir == null) {
dexTask.setDisableDexMerger(true);
} else {
createDir(new File(dexCacheDir));
dexTask.setDexedLibs(dexCacheDir);
}
long startDx = System.currentTimeMillis();
// Using System.err and System.out on purpose. Don't want to pollute build messages with
// tools output
boolean dxSuccess;
synchronized (SYNC_KAWA_OR_DX) {
setProgress(50);
dxSuccess = dexTask.execute(inputList);
if (dxSuccess && (class2List.size() > 0)) {
setProgress(60);
dexTask.setOutput(dexedClassesDir + File.separator + "classes2.dex");
inputList = new ArrayList<File>();
dxSuccess = dexTask.execute(class2List);
setProgress(75);
hasSecondDex = true;
} else if (!dxSuccess) { // The initial dx blew out, try more conservative
LOG.info("DX execution failed, trying with fewer libraries.");
if (secondTry) { // Already tried the more conservative approach!
LOG.warning("YAIL compiler - DX execution failed (secondTry!).");
err.println("YAIL compiler - DX execution failed.");
userErrors.print(String.format(ERROR_IN_STAGE, "DX"));
return false;
} else {
return runDx(classesDir, dexedClassesDir, true);
}
}
}
if (!dxSuccess) {
LOG.warning("YAIL compiler - DX execution failed.");
err.println("YAIL compiler - DX execution failed.");
userErrors.print(String.format(ERROR_IN_STAGE, "DX"));
return false;
}
String dxTimeMessage = "DX time: " +
((System.currentTimeMillis() - startDx) / 1000.0) + " seconds";
out.println(dxTimeMessage);
LOG.info(dxTimeMessage);
return true;
}
private boolean runAaptPackage(File manifestFile, File resDir, String tmpPackageName) {
// Need to make sure assets directory exists otherwise aapt will fail.
createDir(project.getAssetsDirectory());
String aaptTool;
String osName = System.getProperty("os.name");
if (osName.equals("Mac OS X")) {
aaptTool = MAC_AAPT_TOOL;
} else if (osName.equals("Linux")) {
aaptTool = LINUX_AAPT_TOOL;
} else if (osName.startsWith("Windows")) {
aaptTool = WINDOWS_AAPT_TOOL;
} else {
LOG.warning("YAIL compiler - cannot run AAPT on OS " + osName);
err.println("YAIL compiler - cannot run AAPT on OS " + osName);
userErrors.print(String.format(ERROR_IN_STAGE, "AAPT"));
return false;
}
String[] aaptPackageCommandLine = {
getResource(aaptTool),
"package",
"-v",
"-f",
"-M", manifestFile.getAbsolutePath(),
"-S", resDir.getAbsolutePath(),
"-A", project.getAssetsDirectory().getAbsolutePath(),
"-I", getResource(ANDROID_RUNTIME),
"-F", tmpPackageName,
libsDir.getAbsolutePath()
};
long startAapt = System.currentTimeMillis();
// Using System.err and System.out on purpose. Don't want to pollute build messages with
// tools output
if (!Execution.execute(null, aaptPackageCommandLine, System.out, System.err)) {
LOG.warning("YAIL compiler - AAPT execution failed.");
err.println("YAIL compiler - AAPT execution failed.");
userErrors.print(String.format(ERROR_IN_STAGE, "AAPT"));
return false;
}
String aaptTimeMessage = "AAPT time: " +
((System.currentTimeMillis() - startAapt) / 1000.0) + " seconds";
out.println(aaptTimeMessage);
LOG.info(aaptTimeMessage);
return true;
}
private boolean insertNativeLibs(File buildDir){
/**
* Native libraries are targeted for particular processor architectures.
* Here, non-default architectures (ARMv5TE is default) are identified with suffixes
* before being placed in the appropriate directory with their suffix removed.
*/
libsDir = createDir(buildDir, LIBS_DIR_NAME);
File armeabiDir = createDir(libsDir, ARMEABI_DIR_NAME);
File armeabiV7aDir = createDir(libsDir, ARMEABI_V7A_DIR_NAME);
try {
for (String type : nativeLibsNeeded.keySet()) {
for (String lib : nativeLibsNeeded.get(type)) {
boolean isV7a = lib.endsWith(ARMEABI_V7A_SUFFIX);
String sourceDirName = isV7a ? ARMEABI_V7A_DIR_NAME : ARMEABI_DIR_NAME;
File targetDir = isV7a ? armeabiV7aDir : armeabiDir;
lib = isV7a ? lib.substring(0, lib.length() - ARMEABI_V7A_SUFFIX.length()) : lib;
String sourcePath = "";
String pathSuffix = RUNTIME_FILES_DIR + sourceDirName + SLASH + lib;
if (simpleCompTypes.contains(type)) {
sourcePath = getResource(pathSuffix);
} else if (extCompTypes.contains(type)) {
sourcePath = getExtCompDirPath(type) + pathSuffix;
targetDir = createDir(targetDir, EXT_COMPS_DIR_NAME);
targetDir = createDir(targetDir, type);
} else {
userErrors.print(String.format(ERROR_IN_STAGE, "Native Code"));
return false;
}
Files.copy(new File(sourcePath), new File(targetDir, lib));
}
}
return true;
} catch (IOException e) {
e.printStackTrace();
userErrors.print(String.format(ERROR_IN_STAGE, "Native Code"));
return false;
}
}
private boolean attachCompAssets() {
createDir(project.getAssetsDirectory()); // Needed to insert resources.
try {
// Gather non-library assets to be added to apk's Asset directory.
// The assets directory have been created before this.
File compAssetDir = createDir(project.getAssetsDirectory(),
ASSET_DIRECTORY);
for (String type : assetsNeeded.keySet()) {
for (String assetName : assetsNeeded.get(type)) {
File targetDir = compAssetDir;
String sourcePath = "";
String pathSuffix = RUNTIME_FILES_DIR + assetName;
if (simpleCompTypes.contains(type)) {
sourcePath = getResource(pathSuffix);
} else if (extCompTypes.contains(type)) {
sourcePath = getExtCompDirPath(type) + pathSuffix;
targetDir = createDir(targetDir, EXT_COMPS_DIR_NAME);
targetDir = createDir(targetDir, type);
} else {
userErrors.print(String.format(ERROR_IN_STAGE, "Assets"));
return false;
}
Files.copy(new File(sourcePath), new File(targetDir, assetName));
}
}
return true;
} catch (IOException e) {
e.printStackTrace();
userErrors.print(String.format(ERROR_IN_STAGE, "Assets"));
return false;
}
}
/**
* Writes out the given resource as a temp file and returns the absolute path.
* Caches the location of the files, so we can reuse them.
*
* @param resourcePath the name of the resource
*/
static synchronized String getResource(String resourcePath) {
try {
File file = resources.get(resourcePath);
if (file == null) {
String basename = PathUtil.basename(resourcePath);
String prefix;
String suffix;
int lastDot = basename.lastIndexOf(".");
if (lastDot != -1) {
prefix = basename.substring(0, lastDot);
suffix = basename.substring(lastDot);
} else {
prefix = basename;
suffix = "";
}
while (prefix.length() < 3) {
prefix = prefix + "_";
}
file = File.createTempFile(prefix, suffix);
file.setExecutable(true);
file.deleteOnExit();
file.getParentFile().mkdirs();
Files.copy(Resources.newInputStreamSupplier(Compiler.class.getResource(resourcePath)),
file);
resources.put(resourcePath, file);
}
return file.getAbsolutePath();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
/*
* Loads permissions and information on component libraries and assets.
*/
private void loadJsonInfo(ConcurrentMap<String, Set<String>> infoMap, String targetInfo)
throws IOException, JSONException {
synchronized (infoMap) {
if (!infoMap.isEmpty()) {
return;
}
JSONArray buildInfo = new JSONArray(
"[" + simpleCompsBuildInfo.join(",") + "," +
extCompsBuildInfo.join(",") + "]");
for (int i = 0; i < buildInfo.length(); ++i) {
JSONObject compJson = buildInfo.getJSONObject(i);
JSONArray infoArray = null;
String type = compJson.getString("type");
try {
infoArray = compJson.getJSONArray(targetInfo);
} catch (JSONException e) {
// Older compiled extensions will not have a broadcastReiver
// defined. Rather then require them all to be recompiled, we
// treat the missing attribute as empty.
if (e.getMessage().contains("broadcastReceiver")) {
LOG.log(Level.INFO, "Component \"" + type + "\" does not have a broadcast receiver.");
continue;
} else {
throw e;
}
}
if (!simpleCompTypes.contains(type) && !extCompTypes.contains(type)) {
continue;
}
Set<String> infoSet = Sets.newHashSet();
for (int j = 0; j < infoArray.length(); ++j) {
String info = infoArray.getString(j);
infoSet.add(info);
}
if (!infoSet.isEmpty()) {
infoMap.put(type, infoSet);
}
}
}
}
/**
* Copy one file to another. If destination file does not exist, it is created.
*
* @param srcPath absolute path to source file
* @param dstPath absolute path to destination file
* @return {@code true} if the copy succeeds, {@code false} otherwise
*/
private static Boolean copyFile(String srcPath, String dstPath) {
try {
FileInputStream in = new FileInputStream(srcPath);
FileOutputStream out = new FileOutputStream(dstPath);
byte[] buf = new byte[1024];
int len;
while ((len = in.read(buf)) > 0) {
out.write(buf, 0, len);
}
in.close();
out.close();
}
catch (IOException e) {
e.printStackTrace();
return false;
}
return true;
}
/**
* Creates a new directory (if it doesn't exist already).
*
* @param dir new directory
* @return new directory
*/
private static File createDir(File dir) {
if (!dir.exists()) {
dir.mkdir();
}
return dir;
}
/**
* Creates a new directory (if it doesn't exist already).
*
* @param parentDir parent directory of new directory
* @param name name of new directory
* @return new directory
*/
private static File createDir(File parentDir, String name) {
File dir = new File(parentDir, name);
if (!dir.exists()) {
dir.mkdir();
}
return dir;
}
/**
* Creates a new directory (if it doesn't exist already).
*
* @param parentDirectory parent directory of new directory
* @param name name of new directory
* @return new directory
*/
private static File createDirectory(File parentDirectory, String name) {
File dir = new File(parentDirectory, name);
if (!dir.exists()) {
dir.mkdir();
}
return dir;
}
private static int setProgress(int increments) {
Compiler.currentProgress = increments;
LOG.info("The current progress is "
+ Compiler.currentProgress + "%");
return Compiler.currentProgress;
}
public static int getProgress() {
if (Compiler.currentProgress==100) {
Compiler.currentProgress = 10;
return 100;
} else {
return Compiler.currentProgress;
}
}
private void readBuildInfo() {
try {
simpleCompsBuildInfo = new JSONArray(Resources.toString(
Compiler.class.getResource(COMP_BUILD_INFO), Charsets.UTF_8));
extCompsBuildInfo = new JSONArray();
for (String type : extCompTypes) {
// .../assets/external_comps/com.package.MyExtComp/files/component_build_info.json
File extCompRuntimeFileDir = new File(getExtCompDirPath(type) + RUNTIME_FILES_DIR);
String jsonFileName = "component_build_info.json";
File jsonFile = new File(extCompRuntimeFileDir, jsonFileName);
extCompsBuildInfo.put(new JSONObject(Resources.toString(
jsonFile.toURI().toURL(), Charsets.UTF_8)));
}
} catch (Exception e) {
e.printStackTrace();
}
}
private void prepareCompTypes(Set<String> neededTypes) {
try {
JSONArray buildInfo = new JSONArray(Resources.toString(
Compiler.class.getResource(COMP_BUILD_INFO), Charsets.UTF_8));
Set<String> allSimpleTypes = Sets.newHashSet();
for (int i = 0; i < buildInfo.length(); ++i) {
JSONObject comp = buildInfo.getJSONObject(i);
allSimpleTypes.add(comp.getString("type"));
}
simpleCompTypes = Sets.newHashSet(neededTypes);
simpleCompTypes.retainAll(allSimpleTypes);
extCompTypes = Sets.newHashSet(neededTypes);
extCompTypes.removeAll(allSimpleTypes);
} catch (Exception e) {
e.printStackTrace();
}
}
private String getExtCompDirPath(String type) {
createDir(project.getAssetsDirectory());
return project.getAssetsDirectory().getAbsolutePath() + SLASH +
EXT_COMPS_DIR_NAME + SLASH + type;
}
}
| apache-2.0 |
wolaoda13/foundation | src/main/java/com/sunpeng/foundation/modules/cms/entity/Site.java | 3127 | /**
* Copyright © 2012-2014 <a href="https://github.com/thinkgem/jeesite">JeeSite</a> All rights reserved.
*/
package com.sunpeng.foundation.modules.cms.entity;
import org.apache.commons.lang3.StringUtils;
import org.hibernate.validator.constraints.Length;
import com.sunpeng.foundation.common.persistence.DataEntity;
import com.sunpeng.foundation.modules.sys.utils.UserUtils;
/**
* 站点Entity
* @author ThinkGem
* @version 2013-05-15
*/
public class Site extends DataEntity<Site> {
private static final long serialVersionUID = 1L;
private String name; // 站点名称
private String title; // 站点标题
private String logo; // 站点logo
private String description;// 描述,填写有助于搜索引擎优化
private String keywords;// 关键字,填写有助于搜索引擎优化
private String theme; // 主题
private String copyright;// 版权信息
private String customIndexView;// 自定义首页视图文件
private String domain;
public Site() {
super();
}
public Site(String id){
this();
this.id = id;
}
@Length(min=1, max=100)
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
@Length(min=1, max=100)
public String getTitle() {
return title;
}
public void setTitle(String title) {
this.title = title;
}
public String getLogo() {
return logo;
}
public void setLogo(String logo) {
this.logo = logo;
}
@Length(min=0, max=255)
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
@Length(min=0, max=255)
public String getKeywords() {
return keywords;
}
public void setKeywords(String keywords) {
this.keywords = keywords;
}
@Length(min=1, max=255)
public String getTheme() {
return theme;
}
public void setTheme(String theme) {
this.theme = theme;
}
public String getCopyright() {
return copyright;
}
public void setCopyright(String copyright) {
this.copyright = copyright;
}
public String getCustomIndexView() {
return customIndexView;
}
public void setCustomIndexView(String customIndexView) {
this.customIndexView = customIndexView;
}
/**
* 获取默认站点ID
*/
public static String defaultSiteId(){
return "1";
}
/**
* 判断是否为默认(主站)站点
*/
public static boolean isDefault(String id){
return id != null && id.equals(defaultSiteId());
}
/**
* 获取当前编辑的站点编号
*/
public static String getCurrentSiteId(){
String siteId = (String)UserUtils.getCache("siteId");
return StringUtils.isNotBlank(siteId)?siteId:defaultSiteId();
}
/**
* 模板路径
*/
public static final String TPL_BASE = "/WEB-INF/views/modules/cms/front/themes";
/**
* 获得模板方案路径。如:/WEB-INF/views/modules/cms/front/themes/jeesite
*
* @return
*/
public String getSolutionPath() {
return TPL_BASE + "/" + getTheme();
}
public String getDomain() {
return domain;
}
public void setDomain(String domain) {
this.domain = domain;
}
} | apache-2.0 |
dobermai/mqtt-irc-bot | src/main/java/de/dobermai/mqttbot/config/MQTTProperties.java | 2499 | /*
* Copyright 2013 Dominik Obermaier <dominik.obermaier@gmail.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.dobermai.mqttbot.config;
import com.netflix.governator.annotations.Configuration;
import javax.inject.Singleton;
import javax.validation.constraints.Max;
import javax.validation.constraints.Min;
/**
* @author Dominik Obermaier
*/
@Singleton
public class MQTTProperties {
MQTTProperties() {
//Do not instantiate by your onw. Inject it!
}
@Configuration("broker.host")
private String brokerHost = "broker.mqttdashboard.com";
@Configuration("broker.port")
@Min(1)
@Max(65535)
private int brokerPort = 1883;
@Configuration("mqtt.keepAlive")
@Min(3)
private int mqttKeepAlive = 60;
@Configuration("mqtt.clientId")
private String mqttClientId = "mqtt-bot";
@Configuration(value = "mqtt.cleanSession", ignoreTypeMismatch = true)
private boolean mqttcleanSession = true;
@Configuration("mqtt.username")
private String mqttUsername = "";
@Configuration("mqtt.password")
private String mqttPassword = "";
@Configuration("mqtt.topicPrefix")
private String mqttTopicPrefix = "irc";
@Configuration("mqtt.ircChannelPrefix")
private String mqttIrcChannelPrefix = "%";
public String getBrokerHost() {
return brokerHost;
}
public int getBrokerPort() {
return brokerPort;
}
public int getMqttKeepAlive() {
return mqttKeepAlive;
}
public String getMqttClientId() {
return mqttClientId;
}
public boolean isMqttcleanSession() {
return mqttcleanSession;
}
public String getMqttUsername() {
return mqttUsername;
}
public String getMqttPassword() {
return mqttPassword;
}
public String getMqttTopicPrefix() {
return mqttTopicPrefix;
}
public String getMqttIrcChannelPrefix() {
return mqttIrcChannelPrefix;
}
}
| apache-2.0 |
wavelets/smile | SmileMath/src/smile/sort/PriorityQueue.java | 3408 | /******************************************************************************
* Confidential Proprietary *
* (c) Copyright Haifeng Li 2011, All Rights Reserved *
******************************************************************************/
package smile.sort;
/**
* Priority Queue for index items.
*
* @author Haifeng Li
*/
public class PriorityQueue {
/**
* The number of items in the queue.
*/
private int n;
/**
* The d-ary heap or d-heap is a generalization of the binary heap data
* structure whose non-leaf nodes have d children, instead of 2. Thus,
* a binary heap is a 2-heap.
*/
private int d;
/**
* External array of priority.
*/
private double[] a;
/**
* The array of item indices.
*/
private int[] pq;
/**
* The inverse array qp allows the priority-queue to treat the array indices
* as handles.
*/
private int[] qp;
/**
* Priority comparison of item i and j.
* @param i item index
* @param j item index
*/
private boolean less(int i, int j) {
return a[pq[i]] < a[pq[j]];
}
/**
* Swap i and j items of pq and qp.
* @param i item index
* @param j item index
*/
private void swap(int i, int j) {
int t = pq[i];
pq[i] = pq[j];
pq[j] = t;
qp[pq[i]] = i;
qp[pq[j]] = j;
}
/**
* fix up.
*/
private void swim(int k) {
while (k > 1 && less(k, (k + d - 2) / d)) {
swap(k, (k + d - 2) / d);
k = (k + d - 2) / d;
}
}
/**
* fix down.
*/
private void sink(int k, int N) {
int j;
while ((j = d * (k - 1) + 2) <= N) {
for (int i = j + 1; i < j + d && i <= N; i++) {
if (less(i, j)) {
j = i;
}
}
if (!(less(j, k))) {
break;
}
swap(k, j);
k = j;
}
}
/**
* Constructor. Default use a 3-heap.
* @param a external array of priority. Lower value means higher priority.
*/
public PriorityQueue(double[] a) {
this(3, a);
}
/**
* Constructor.
* @param d d-heap.
* @param a external array of priority. Lower value means higher priority.
*/
public PriorityQueue(int d, double[] a) {
this.d = d;
this.a = a;
this.n = 0;
pq = new int[a.length + 1];
qp = new int[a.length + 1];
}
/**
* Returns true if the queue is empty.
*/
public boolean empty() {
return n == 0;
}
/**
* Insert a new item into queue.
* @param v the index of item.
*/
public void insert(int v) {
pq[++n] = v;
qp[v] = n;
swim(n);
}
/**
* Removes and returns the index of item with minimum value (highest priority).
*/
public int poll() {
swap(1, n);
sink(1, n - 1);
return pq[n--];
}
/**
* The value of item k is lower (higher priority) now.
*/
public void lower(int k) {
swim(qp[k]);
}
/**
* The priority of item k has changed.
*/
public void change(int k) {
swim(qp[k]);
sink(qp[k], n);
}
}
| apache-2.0 |
osanchezUM/guizmo | src/lasser/sketch/impl/BorderImpl.java | 3394 | /**
*/
package lasser.sketch.impl;
import lasser.sketch.Border;
import lasser.sketch.BorderType;
import lasser.sketch.SketchPackage;
import org.eclipse.emf.common.notify.Notification;
import org.eclipse.emf.ecore.EClass;
import org.eclipse.emf.ecore.impl.ENotificationImpl;
import org.eclipse.emf.ecore.impl.EObjectImpl;
/**
* <!-- begin-user-doc -->
* An implementation of the model object '<em><b>Border</b></em>'.
* <!-- end-user-doc -->
* <p>
* The following features are implemented:
* <ul>
* <li>{@link lasser.sketch.impl.BorderImpl#getType <em>Type</em>}</li>
* </ul>
* </p>
*
* @generated
*/
public class BorderImpl extends EObjectImpl implements Border {
/**
* The default value of the '{@link #getType() <em>Type</em>}' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see #getType()
* @generated
* @ordered
*/
protected static final BorderType TYPE_EDEFAULT = BorderType.DEFAULT;
/**
* The cached value of the '{@link #getType() <em>Type</em>}' attribute.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see #getType()
* @generated
* @ordered
*/
protected BorderType type = TYPE_EDEFAULT;
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
protected BorderImpl() {
super();
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
@Override
protected EClass eStaticClass() {
return SketchPackage.Literals.BORDER;
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public BorderType getType() {
return type;
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public void setType(BorderType newType) {
BorderType oldType = type;
type = newType == null ? TYPE_EDEFAULT : newType;
if (eNotificationRequired())
eNotify(new ENotificationImpl(this, Notification.SET, SketchPackage.BORDER__TYPE, oldType, type));
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
@Override
public Object eGet(int featureID, boolean resolve, boolean coreType) {
switch (featureID) {
case SketchPackage.BORDER__TYPE:
return getType();
}
return super.eGet(featureID, resolve, coreType);
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
@Override
public void eSet(int featureID, Object newValue) {
switch (featureID) {
case SketchPackage.BORDER__TYPE:
setType((BorderType)newValue);
return;
}
super.eSet(featureID, newValue);
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
@Override
public void eUnset(int featureID) {
switch (featureID) {
case SketchPackage.BORDER__TYPE:
setType(TYPE_EDEFAULT);
return;
}
super.eUnset(featureID);
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
@Override
public boolean eIsSet(int featureID) {
switch (featureID) {
case SketchPackage.BORDER__TYPE:
return type != TYPE_EDEFAULT;
}
return super.eIsSet(featureID);
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
@Override
public String toString() {
if (eIsProxy()) return super.toString();
StringBuffer result = new StringBuffer(super.toString());
result.append(" (type: ");
result.append(type);
result.append(')');
return result.toString();
}
} //BorderImpl
| apache-2.0 |
yaye729125/gles | src/com/orange/entity/modifier/SkewYModifier.java | 2693 | package com.orange.entity.modifier;
import com.orange.entity.IEntity;
import com.orange.util.modifier.ease.EaseLinear;
import com.orange.util.modifier.ease.IEaseFunction;
/**
* (c) OrangeGame 2012
*
* @author OrangeGame <OGEngine@orangegame.cn>
*/
public class SkewYModifier extends SingleValueSpanEntityModifier {
// ===========================================================
// Constants
// ===========================================================
// ===========================================================
// Fields
// ===========================================================
// ===========================================================
// Constructors
// ===========================================================
public SkewYModifier(final float pDuration, final float pFromSkewY, final float pToSkewY) {
this(pDuration, pFromSkewY, pToSkewY, null, EaseLinear.getInstance());
}
public SkewYModifier(final float pDuration, final float pFromSkewY, final float pToSkewY, final IEaseFunction pEaseFunction) {
this(pDuration, pFromSkewY, pToSkewY, null, pEaseFunction);
}
public SkewYModifier(final float pDuration, final float pFromSkewY, final float pToSkewY, final IEntityModifierListener pEntityModifierListener) {
super(pDuration, pFromSkewY, pToSkewY, pEntityModifierListener, EaseLinear.getInstance());
}
public SkewYModifier(final float pDuration, final float pFromSkewY, final float pToSkewY, final IEntityModifierListener pEntityModifierListener, final IEaseFunction pEaseFunction) {
super(pDuration, pFromSkewY, pToSkewY, pEntityModifierListener, pEaseFunction);
}
protected SkewYModifier(final SkewYModifier pSkewYModifier) {
super(pSkewYModifier);
}
@Override
public SkewYModifier deepCopy(){
return new SkewYModifier(this);
}
// ===========================================================
// Getter & Setter
// ===========================================================
// ===========================================================
// Methods for/from SuperClass/Interfaces
// ===========================================================
@Override
protected void onSetInitialValue(final IEntity pEntity, final float pSkewY) {
pEntity.setSkewY(pSkewY);
}
@Override
protected void onSetValue(final IEntity pEntity, final float pPercentageDone, final float pSkewY) {
pEntity.setSkewY(pSkewY);
}
// ===========================================================
// Methods
// ===========================================================
// ===========================================================
// Inner and Anonymous Classes
// ===========================================================
}
| apache-2.0 |
tallycheck/data-support | datasolution-base/src/main/java/com/taoswork/tallycheck/datasolution/core/entityprotect/field/validate/TypedFieldValidator.java | 514 | package com.taoswork.tallycheck.datasolution.core.entityprotect.field.validate;
import com.taoswork.tallycheck.datadomain.base.entity.validation.error.ValidationError;
import com.taoswork.tallycheck.datasolution.core.entityprotect.field.handler.ITypedFieldHandler;
import com.taoswork.tallycheck.descriptor.metadata.IFieldMeta;
/**
* Created by Gao Yuan on 2015/9/28.
*/
public interface TypedFieldValidator extends ITypedFieldHandler {
ValidationError validate(IFieldMeta fieldMeta, Object fieldValue);
}
| apache-2.0 |
ci-cd/gradle-swagger-plugin | src/main/java/com/github/cicd/swagger/docgen/remote/RemoteDocumentSource.java | 3734 | package com.github.cicd.swagger.docgen.remote;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.SerializationFeature;
import com.github.cicd.swagger.docgen.AbstractDocumentSource;
import com.wordnik.swagger.model.ApiDescription;
import com.wordnik.swagger.model.ApiListing;
import org.apache.http.HttpResponse;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.utils.URIBuilder;
import org.apache.http.impl.client.DefaultHttpClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.collection.Iterator;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
/**
* Created with IntelliJ IDEA.
*
* @author: chekong
* 05/13/2013
*/
public class RemoteDocumentSource extends AbstractDocumentSource {
private static final Logger logger = LoggerFactory.getLogger(RemoteDocumentSource.class);
private final URI requestURI;
ObjectMapper mapper = new ObjectMapper();
private boolean withFormatSuffix = true;
public RemoteDocumentSource(URI requestURI, String outputTpl, String outputPath, String swaggerOutput, String mustacheFileRoot, boolean useOutputFlatStructure) {
super(outputPath, outputTpl, swaggerOutput, mustacheFileRoot, useOutputFlatStructure);
this.requestURI = requestURI;
mapper.setSerializationInclusion(JsonInclude.Include.NON_NULL);
mapper.setSerializationInclusion(JsonInclude.Include.NON_DEFAULT);
mapper.configure(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS, false);
mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
}
public void loadDocuments() throws IOException {
HttpClient client = new DefaultHttpClient();
HttpResponse response = client.execute(new HttpGet(requestURI));
if (response.getStatusLine().getStatusCode() != 200) {
throw new IOException(requestURI + " got " + response.getStatusLine().getReasonPhrase());
}
ApiListing doc = mapper.readValue(response.getEntity().getContent(), ApiListing.class);
// serviceDocument = doc;
setApiVersion(doc.apiVersion());
setBasePath(doc.basePath());
URIBuilder uriBuilder = new URIBuilder(requestURI);
String path = uriBuilder.getPath();
for (Iterator<ApiDescription> iterator = doc.apis().iterator(); iterator.hasNext(); ) {
ApiDescription endPoint = iterator.next();
String _endpoint = endPoint.path().replaceAll("/api-docs\\.\\{format\\}", "");
uriBuilder.setPath((path + "/" + _endpoint).replaceAll("\\/\\/", "/"));
String newURL = null;
try {
newURL = uriBuilder.build().toString();
} catch (URISyntaxException e) {
logger.error("URL is not valid." + e);
continue;
}
logger.info("calling " + newURL);
response = client.execute(new HttpGet(newURL));
ApiListing _doc = mapper.readValue(response.getEntity().getContent(), ApiListing.class);
if (!withFormatSuffix) {
for (Iterator<ApiDescription> iterator1 = _doc.apis().iterator(); iterator1.hasNext(); ) {
ApiDescription ep = iterator1.next();
// ep.path() = (ep.path().replaceAll("\\.\\{format}", ""));
}
}
acceptDocument(_doc);
}
}
public void withFormatSuffix(boolean with) {
this.withFormatSuffix = with;
}
}
| apache-2.0 |
gfuil/bmap | app/src/main/java/me/gfuil/bmap/lite/adapter/SearchTipsAdatper.java | 2603 | package me.gfuil.bmap.lite.adapter;
import android.content.Context;
import android.view.View;
import android.view.ViewGroup;
import android.widget.Filter;
import android.widget.Filterable;
import android.widget.TextView;
import java.util.ArrayList;
import java.util.List;
import me.gfuil.bmap.lite.R;
import me.gfuil.bmap.lite.base.BaseListAdapter;
/**
* @author gfuil
*/
public class SearchTipsAdatper extends BaseListAdapter<String> implements Filterable {
public SearchTipsAdatper(Context context, List<String> list) {
super(context, list);
}
@Override
public View getView(int position, View convertView, ViewGroup parent) {
if (null == convertView){
convertView = getInflater().inflate(R.layout.item_search_tips, parent, false);
}
TextView textTips = ViewHolder.get(convertView, R.id.text_keyword);
textTips.setText(getList().get(position));
return convertView;
}
@Override
public Filter getFilter() {
if (mFilter == null) {
mFilter = new ArrayFilter();
}
return mFilter;
}
private ArrayFilter mFilter;
private List<String> mUnfilteredData;
private class ArrayFilter extends Filter {
@Override
protected FilterResults performFiltering(CharSequence prefix) {
FilterResults results = new FilterResults();
if (mUnfilteredData == null) {
mUnfilteredData = getList();
}
if (prefix == null || prefix.length() == 0) {
results.values = getList();
results.count = getList().size();
} else {
List<String> unfilteredValues = mUnfilteredData;
int count = unfilteredValues.size();
List<String> newValues = new ArrayList<>(count);
for (int i = 0; i < count; i++) {
String pc = unfilteredValues.get(i);
if (pc != null) {
newValues.add(pc);
}
}
results.values = newValues;
results.count = newValues.size();
}
return results;
}
@Override
protected void publishResults(CharSequence constraint, FilterResults results) {
//noinspection unchecked
setList((List<String>) results.values, true);
if (results.count > 0) {
notifyDataSetChanged();
} else {
notifyDataSetInvalidated();
}
}
}
}
| apache-2.0 |
Busy-Brain/Portal | src/main/java/com/mk/portal/framework/page/html/attributes/UsemapAttribute.java | 367 | package com.mk.portal.framework.page.html.attributes;
import com.mk.portal.framework.html.objects.Attribute;
public class UsemapAttribute implements Attribute {
private String value;
public UsemapAttribute(String val){
this.value=val;
}
@Override
public String getName() {
return "usemap";
}
@Override
public String getValue() {
return value;
}
}
| apache-2.0 |
Top-Q/difido-reports | server/difido-server/src/main/java/il/co/topq/report/business/archive/Archiver.java | 457 | package il.co.topq.report.business.archive;
/**
* Interface for services that are responsible for pulling executions from
* remote Difido server to archive them
*
* @author Itai Agmon
*
*/
public interface Archiver {
/**
* Starting the archive process. <br>
* 1. Initialize the local reports folder if it is not exists.<br>
* 2. Get all the remote executions<br>
* 3. Archive old and finished executions <br>
*
*/
void archive();
}
| apache-2.0 |
consulo/consulo-android | tools-base/build-system/gradle-experimental/src/main/groovy/com/android/build/gradle/model/AndroidComponentModelPlugin.java | 11649 | /*
* Copyright (C) 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.android.build.gradle.model;
import static com.android.builder.core.VariantType.ANDROID_TEST;
import static com.android.builder.core.VariantType.UNIT_TEST;
import com.android.build.gradle.internal.ProductFlavorCombo;
import com.android.build.gradle.managed.AndroidConfig;
import com.android.build.gradle.managed.BuildType;
import com.android.build.gradle.managed.ProductFlavor;
import com.android.builder.core.BuilderConstants;
import com.android.sdklib.repository.FullRevision;
import com.android.utils.StringHelper;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.google.common.primitives.Ints;
import org.gradle.api.Action;
import org.gradle.api.Plugin;
import org.gradle.api.Project;
import org.gradle.internal.reflect.Instantiator;
import org.gradle.internal.service.ServiceRegistry;
import org.gradle.language.base.ProjectSourceSet;
import org.gradle.language.base.internal.registry.LanguageRegistration;
import org.gradle.language.base.internal.registry.LanguageRegistry;
import org.gradle.language.base.plugins.ComponentModelBasePlugin;
import org.gradle.model.Defaults;
import org.gradle.model.Finalize;
import org.gradle.model.Model;
import org.gradle.model.ModelMap;
import org.gradle.model.Mutate;
import org.gradle.model.Path;
import org.gradle.model.RuleSource;
import org.gradle.platform.base.BinaryType;
import org.gradle.platform.base.BinaryTypeBuilder;
import org.gradle.platform.base.ComponentBinaries;
import org.gradle.platform.base.ComponentType;
import org.gradle.platform.base.ComponentTypeBuilder;
import org.gradle.platform.base.LanguageType;
import org.gradle.platform.base.LanguageTypeBuilder;
import org.gradle.tooling.BuildException;
import java.io.File;
import java.util.List;
import java.util.Set;
/**
* Plugin to set up infrastructure for other android plugins.
*/
public class AndroidComponentModelPlugin implements Plugin<Project> {
/**
* The name of ComponentSpec created with android component model plugin.
*/
public static final String COMPONENT_NAME = "android";
//public static final Pattern GRADLE_ACCEPTABLE_VERSIONS = Pattern.compile("2\\.5.*");
public static final String GRADLE_ACCEPTABLE_VERSION = "2.5";
private static final String GRADLE_VERSION_CHECK_OVERRIDE_PROPERTY =
"com.android.build.gradle.overrideVersionCheck";
@Override
public void apply(Project project) {
checkGradleVersion(project);
project.getPlugins().apply(ComponentModelBasePlugin.class);
}
private static void checkGradleVersion(Project project) {
String gradleVersion = project.getGradle().getGradleVersion();
if (!gradleVersion.startsWith(GRADLE_ACCEPTABLE_VERSION)) {
boolean allowNonMatching = Boolean.getBoolean(GRADLE_VERSION_CHECK_OVERRIDE_PROPERTY);
File file = new File("gradle" + File.separator + "wrapper" + File.separator +
"gradle-wrapper.properties");
String errorMessage = String.format(
"Gradle version %s is required. Current version is %s. " +
"If using the gradle wrapper, try editing the distributionUrl in %s " +
"to gradle-%s-all.zip",
GRADLE_ACCEPTABLE_VERSION, gradleVersion, file.getAbsolutePath(),
GRADLE_ACCEPTABLE_VERSION);
if (allowNonMatching) {
project.getLogger().warn(errorMessage);
project.getLogger().warn("As %s is set, continuing anyways.",
GRADLE_VERSION_CHECK_OVERRIDE_PROPERTY);
} else {
throw new BuildException(errorMessage, null);
}
}
}
@SuppressWarnings("MethodMayBeStatic")
public static class Rules extends RuleSource {
@LanguageType
public void registerLanguage(LanguageTypeBuilder<AndroidLanguageSourceSet> builder) {
builder.setLanguageName("android");
builder.defaultImplementation(AndroidLanguageSourceSet.class);
}
/**
* Create "android" model block.
*/
@Model("android")
public void android(AndroidConfig androidModel) {
}
@Defaults
public void androidModelSources(AndroidConfig androidModel,
@Path("androidSources") AndroidComponentModelSourceSet sources) {
androidModel.setSources(sources);
}
@Finalize
public void finalizeAndroidModel(AndroidConfig androidModel) {
if (androidModel.getBuildToolsRevision() == null
&& androidModel.getBuildToolsVersion() != null) {
androidModel.setBuildToolsRevision(
FullRevision.parseRevision(androidModel.getBuildToolsVersion()));
}
if (androidModel.getCompileSdkVersion() != null
&& !androidModel.getCompileSdkVersion().startsWith("android-")
&& Ints.tryParse(androidModel.getCompileSdkVersion()) != null) {
androidModel.setCompileSdkVersion("android-" + androidModel.getCompileSdkVersion());
}
}
@Defaults
public void createDefaultBuildTypes(
@Path("android.buildTypes") ModelMap<BuildType> buildTypes) {
buildTypes.create(BuilderConstants.DEBUG, new Action<BuildType>() {
@Override
public void execute(BuildType buildType) {
buildType.setDebuggable(true);
buildType.setEmbedMicroApp(false);
}
});
buildTypes.create(BuilderConstants.RELEASE);
}
@Model
public List<ProductFlavorCombo<ProductFlavor>> createProductFlavorCombo(
@Path("android.productFlavors") ModelMap<ProductFlavor> productFlavors) {
// TODO: Create custom product flavor container to manually configure flavor dimensions.
Set<String> flavorDimensionList = Sets.newHashSet();
for (ProductFlavor flavor : productFlavors.values()) {
if (flavor.getDimension() != null) {
flavorDimensionList.add(flavor.getDimension());
}
}
return ProductFlavorCombo.createCombinations(
Lists.newArrayList(flavorDimensionList),
productFlavors.values());
}
@ComponentType
public void defineComponentType(ComponentTypeBuilder<AndroidComponentSpec> builder) {
builder.defaultImplementation(DefaultAndroidComponentSpec.class);
}
@Mutate
public void createAndroidComponents(ModelMap<AndroidComponentSpec> androidComponents) {
androidComponents.create(COMPONENT_NAME);
}
@Model
public AndroidComponentModelSourceSet androidSources(ServiceRegistry serviceRegistry) {
Instantiator instantiator = serviceRegistry.get(Instantiator.class);
return new AndroidComponentModelSourceSet(instantiator);
}
/**
* Create all source sets for each AndroidBinary.
*/
@Mutate
public void createVariantSourceSet(
@Path("android.sources") final AndroidComponentModelSourceSet sources,
@Path("android.buildTypes") final ModelMap<BuildType> buildTypes,
@Path("android.productFlavors") ModelMap<ProductFlavor> flavors,
List<ProductFlavorCombo<ProductFlavor>> flavorGroups, ProjectSourceSet projectSourceSet,
LanguageRegistry languageRegistry) {
sources.setProjectSourceSet(projectSourceSet);
for (LanguageRegistration languageRegistration : languageRegistry) {
sources.registerLanguage(languageRegistration);
}
// Create main source set.
sources.create("main");
sources.create(ANDROID_TEST.getPrefix());
sources.create(UNIT_TEST.getPrefix());
for (BuildType buildType : buildTypes.values()) {
sources.maybeCreate(buildType.getName());
for (ProductFlavorCombo group: flavorGroups) {
sources.maybeCreate(group.getName());
if (!group.getFlavorList().isEmpty()) {
sources.maybeCreate(
group.getName() + StringHelper.capitalize(buildType.getName()));
}
}
}
if (flavorGroups.size() != flavors.size()) {
// If flavorGroups and flavors are the same size, there is at most 1 flavor
// dimension. So we don't need to reconfigure the source sets for flavorGroups.
for (ProductFlavor flavor: flavors.values()) {
sources.maybeCreate(flavor.getName());
}
}
}
@Finalize
public void setDefaultSrcDir(
@Path("android.sources") AndroidComponentModelSourceSet sourceSet) {
sourceSet.setDefaultSrcDir();
}
@BinaryType
public void defineBinaryType(BinaryTypeBuilder<AndroidBinary> builder) {
builder.defaultImplementation(DefaultAndroidBinary.class);
}
@ComponentBinaries
public void createBinaries(
final ModelMap<AndroidBinary> binaries,
@Path("android") final AndroidConfig androidConfig,
@Path("android.buildTypes") final ModelMap<BuildType> buildTypes,
final List<ProductFlavorCombo<ProductFlavor>> flavorCombos,
final AndroidComponentSpec spec) {
if (flavorCombos.isEmpty()) {
flavorCombos.add(new ProductFlavorCombo<ProductFlavor>());
}
for (final BuildType buildType : buildTypes.values()) {
for (final ProductFlavorCombo<ProductFlavor> flavorCombo : flavorCombos) {
binaries.create(getBinaryName(buildType, flavorCombo),
new Action<AndroidBinary>() {
@Override
public void execute(AndroidBinary androidBinary) {
DefaultAndroidBinary binary = (DefaultAndroidBinary) androidBinary;
binary.setBuildType(buildType);
binary.setProductFlavors(flavorCombo.getFlavorList());
}
});
}
}
}
private static String getBinaryName(BuildType buildType, ProductFlavorCombo flavorCombo) {
if (flavorCombo.getFlavorList().isEmpty()) {
return buildType.getName();
} else {
return flavorCombo.getName() + StringHelper.capitalize(buildType.getName());
}
}
}
}
| apache-2.0 |
contentful/contentful-management.java | src/main/java/com/contentful/java/cma/model/CMAType.java | 622 | package com.contentful.java.cma.model;
import com.google.gson.annotations.SerializedName;
/**
* A Contentful resource will be of one of those types. If a new type gets added, this enum will be
* set to null.
*/
public enum CMAType {
ApiKey,
Array,
Asset,
ContentType,
EditorInterface,
Entry,
Environment,
Link,
Locale,
Organization,
OrganizationPeriodicUsage,
SpacePeriodicUsage,
PersonalAccessToken,
PreviewApiKey,
Role,
Snapshot,
Space,
SpaceMembership,
Tag,
Upload,
User,
@SerializedName("Extension") UiExtension,
Webhook,
WebhookCallOverview,
WebhookDefinition
}
| apache-2.0 |
MayerTh/RVRPSimulator | vrpsim-vrprep-util-impl/src/main/java/vrpsim/vrprep/util/impl/VRPREPInstanceProviderAPI.java | 5723 | /**
* Copyright © 2016 Thomas Mayer (thomas.mayer@unibw.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package vrpsim.vrprep.util.impl;
import java.io.File;
import java.io.IOException;
import java.net.URISyntaxException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import javax.xml.bind.JAXBException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.vrprep.model.instance.Instance;
import org.vrprep.model.instance.Instance.Fleet.VehicleProfile;
import org.vrprep.model.instance.Instance.Network.Nodes.Node;
import org.vrprep.model.instance.Instance.Requests.Request;
import org.vrprep.model.util.Instances;
import vrpsim.vrprep.util.api.IVRPREPInstanceProviderAPI;
import vrpsim.vrprep.util.impl.util.VRPREPInstanceProviderUtil;
public class VRPREPInstanceProviderAPI implements IVRPREPInstanceProviderAPI {
private static Logger logger = LoggerFactory.getLogger(VRPREPInstanceProviderAPI.class);
private final String INSTANCE_ROOT = "vrprepformat";
private final VRPREPInstanceProviderUtil util = new VRPREPInstanceProviderUtil();
@Override
public List<String> getAvailableInstanceKinds() throws IOException {
Path path = Paths.get(INSTANCE_ROOT);
List<Path> paths = util.getAvailablePathsToInstances(path);
return convert(paths);
}
@Override
public List<String> getAvailableInstanceProvidersForKind(String kind) throws IOException {
Path path = Paths.get(INSTANCE_ROOT, kind);
List<Path> paths = util.getAvailablePathsToInstances(path);
return convert(paths);
}
@Override
public List<String> getAvailableInstancesForKindAndProvider(String kind, String provider) throws IOException {
Path path = Paths.get(INSTANCE_ROOT, kind, provider);
List<Path> paths = util.getAvailablePathsToInstances(path);
return convert(paths);
}
@Override
public Instance getAvailableInstance(String kind, String provider, String instance, boolean correctInstances, boolean setMaximumVehicleAndMinimumCustomerCapcity)
throws JAXBException, IOException, URISyntaxException {
if(!instance.endsWith(".xml")) {
instance += ".xml";
}
Path path = Paths.get(INSTANCE_ROOT, kind, provider, instance);
File fileInstance = util.loadInstance(path);
Instance inst = Instances.read(Paths.get(fileInstance.getAbsolutePath()));
if (setMaximumVehicleAndMinimumCustomerCapcity) {
this.adaptInstanceTosetMaximumVehicleAndMinimumCustomerCapcity(inst);
}
if (correctInstances) {
this.correctInstance(inst);
}
return inst;
}
private void adaptInstanceTosetMaximumVehicleAndMinimumCustomerCapcity(Instance i) {
for (VehicleProfile vp : i.getFleet().getVehicleProfile()) {
vp.setCapacity(Double.MAX_VALUE);
}
for (Request r : i.getRequests().getRequest()) {
r.setQuantity(1.0);
}
String newDataset = i.getInfo().getDataset()
+ " (vrpsim.vrprep.util.impl.VRPREPInstanceProviderAPI :: capacity vehicle set to Double.MAX_VALUE :: request quantity set to 1.0)";
i.getInfo().setDataset(newDataset);
}
private List<String> convert(List<Path> paths) {
List<String> result = new ArrayList<>();
for (Path path : paths) {
String str = path.getName(path.getNameCount() - 1).toString();
if (!str.endsWith(".txt")&&!str.endsWith(".pdf")) {
result.add(path.getName(path.getNameCount() - 1).toString());
}
}
return result;
}
private void correctInstance(Instance inst) {
logger.info("Correcting instance is active.");
List<Node> nodesToDelete = new ArrayList<>();
List<Request> requestsToDelete = new ArrayList<>();
Set<Point> testSet = new HashSet<>();
for (Node node : inst.getNetwork().getNodes().getNode()) {
Point testPoint = new Point(node.getCx(), node.getCy());
if (testDuplicates(testSet, testPoint)) {
logger.warn("Network node with existing location {} found: {}, will be marked for deletion", testPoint, node);
nodesToDelete.add(node);
for(Request request : inst.getRequests().getRequest()) {
if(request.getNode().equals(node.getId())) {
logger.warn("Request pointing to marked network node found: : {}, will be marked for deletion", request);
requestsToDelete.add(request);
}
}
} else {
testSet.add(testPoint);
}
}
for (Node node : nodesToDelete) {
logger.warn("Delet: {}", node);
inst.getNetwork().getNodes().getNode().remove(node);
}
for(Request request : requestsToDelete) {
logger.warn("Delet: {}", request);
inst.getRequests().getRequest().remove(request);
}
}
private boolean testDuplicates(Set<Point> testSet, Point testPoint) {
for(Point p : testSet) {
if(p.equals(testPoint)) {
return true;
}
}
return false;
}
private class Point {
public final double x;
public final double y;
public Point(double x, double y) {
super();
this.x = x;
this.y = y;
}
@Override
public boolean equals(Object obj) {
if (obj instanceof Point) {
return ((Double.compare(((Point) obj).x, x) == 0) && (Double.compare(((Point) obj).y, y) == 0));
}
return false;
}
@Override
public String toString() {
return "["+x+","+y+"]";
}
}
}
| apache-2.0 |
guoguo10/gewuschool | src/main/java/com/lqg/service/ParentService.java | 244 | package com.lqg.service;
import com.lqg.base.DaoSupport;
import com.lqg.model.Parent;
public interface ParentService extends DaoSupport<Parent> {
public Parent login(String email, String password);
public boolean isUnique(String email);
}
| apache-2.0 |
ridhishguhan/asianet-autologin | src/riddimon/android/asianetautologin/ServiceStarter.java | 3553 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package riddimon.android.asianetautologin;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import android.content.BroadcastReceiver;
import android.content.Context;
import android.content.Intent;
import android.net.NetworkInfo;
import android.net.wifi.WifiInfo;
import android.net.wifi.WifiManager;
public class ServiceStarter extends BroadcastReceiver {
private static final Logger logger = LoggerFactory.getLogger(ServiceStarter.class);
public static final String ACTION_STATE_CHANGE = "android.net.wifi.STATE_CHANGE";
private static final String ACTION_SUPPLICANT_CONN_CHANGE
= "android.net.wifi.supplicant.CONNECTION_CHANGE";
@Override
public void onReceive(Context context, Intent intent) {
boolean login = SettingsManager.getBoolean(context, SettingsManager
.LOG_IN, true);
boolean stopService = false;
boolean startService = false;
logger.info("Service Starter : Login enabled : {}", login);
if (intent.getAction().equals(ACTION_STATE_CHANGE)) {
NetworkInfo networkInfo = (NetworkInfo) intent.getParcelableExtra(WifiManager
.EXTRA_NETWORK_INFO);
logger.info("WiFi state change : connected = {}", networkInfo.isConnected());
logger.info("WiFi detailed state : {}", networkInfo.getDetailedState());
//String bssid = intent.getStringExtra(WifiManager.EXTRA_BSSID);
//WifiInfo wi = (WifiInfo) intent.getParcelableExtra(WifiManager.EXTRA_WIFI_INFO);
if (!networkInfo.isConnected() && (networkInfo.getDetailedState()
.equals(NetworkInfo.DetailedState.DISCONNECTED)
|| networkInfo.getDetailedState().equals(NetworkInfo
.DetailedState.FAILED))) {
stopService = true;
} else if (networkInfo.isConnected()){
if (login && NetworkUtil.isConnectedToProperNetwork(context)) {
logger.info("WiFi connected to proper network");
startService = true;
} else {
if (login) logger.info("WiFi NOT connected to proper network");
else logger.info("Logged out");
stopService = true;
}
}
} else if (intent.getAction().equals(ACTION_SUPPLICANT_CONN_CHANGE)) {
/*
boolean connected = intent.getBooleanExtra(WifiManager.EXTRA_SUPPLICANT_CONNECTED
, false);
if (connected && NetworkUtil.isConnectedToProperNetwork(context)) {
startService = true;
} else {
stopService = true;
}
*/
}
if (startService) {
logger.info("Starting service");
// initiate login, as it would schedule keepAlive reqs
Intent service = new Intent(context, LoginService.class);
service.setAction(LoginService.ACTION_LOGIN);
context.startService(service);
} else if (stopService) {
logger.info("Stopping service");
Intent service = new Intent(context, LoginService.class);
context.stopService(service);
}
}
} | apache-2.0 |
OculusVR/shanghai-liquibase | liquibase-core/src/main/java/liquibase/snapshot/jvm/ColumnSnapshotGenerator.java | 43822 | package liquibase.snapshot.jvm;
import liquibase.database.AbstractJdbcDatabase;
import liquibase.database.Database;
import liquibase.database.core.*;
import liquibase.database.jvm.JdbcConnection;
import liquibase.datatype.DataTypeFactory;
import liquibase.datatype.LiquibaseDataType;
import liquibase.datatype.core.*;
import liquibase.exception.DatabaseException;
import liquibase.exception.UnexpectedLiquibaseException;
import liquibase.executor.ExecutorService;
import liquibase.logging.LogFactory;
import liquibase.snapshot.*;
import liquibase.statement.DatabaseFunction;
import liquibase.statement.core.RawSqlStatement;
import liquibase.structure.DatabaseObject;
import liquibase.structure.core.*;
import liquibase.util.StringUtils;
import java.math.BigDecimal;
import java.sql.*;
import java.text.DateFormat;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.List;
import java.util.Scanner;
public class ColumnSnapshotGenerator extends JdbcSnapshotGenerator {
public ColumnSnapshotGenerator() {
super(Column.class, new Class[]{Table.class, View.class});
}
@Override
protected DatabaseObject snapshotObject(DatabaseObject example, DatabaseSnapshot snapshot) throws DatabaseException, InvalidExampleException {
Database database = snapshot.getDatabase();
Relation relation = ((Column) example).getRelation();
Schema schema = relation.getSchema();
List<CachedRow> columnMetadataRs = null;
try {
JdbcDatabaseSnapshot.CachingDatabaseMetaData databaseMetaData = ((JdbcDatabaseSnapshot) snapshot).getMetaData();
columnMetadataRs = databaseMetaData.getColumns(((AbstractJdbcDatabase) database).getJdbcCatalogName(schema), ((AbstractJdbcDatabase) database).getJdbcSchemaName(schema), relation.getName(), example.getName());
if (columnMetadataRs.size() > 0) {
CachedRow data = columnMetadataRs.get(0);
return readColumn(data, relation, database);
} else {
return null;
}
} catch (Exception e) {
throw new DatabaseException(e);
}
}
@Override
protected void addTo(DatabaseObject foundObject, DatabaseSnapshot snapshot) throws DatabaseException, InvalidExampleException {
if (!snapshot.getSnapshotControl().shouldInclude(Column.class)) {
return;
}
if (foundObject instanceof Relation) {
Database database = snapshot.getDatabase();
Relation relation = (Relation) foundObject;
List<CachedRow> allColumnsMetadataRs = null;
try {
JdbcDatabaseSnapshot.CachingDatabaseMetaData databaseMetaData = ((JdbcDatabaseSnapshot) snapshot).getMetaData();
Schema schema;
schema = relation.getSchema();
allColumnsMetadataRs = databaseMetaData.getColumns(((AbstractJdbcDatabase) database).getJdbcCatalogName(schema), ((AbstractJdbcDatabase) database).getJdbcSchemaName(schema), relation.getName(), null);
for (CachedRow row : allColumnsMetadataRs) {
Column exampleColumn = new Column().setRelation(relation).setName(row.getString("COLUMN_NAME"));
relation.getColumns().add(exampleColumn);
}
} catch (Exception e) {
throw new DatabaseException(e);
}
}
}
protected Column readColumn(CachedRow columnMetadataResultSet, Relation table, Database database) throws SQLException, DatabaseException {
String rawTableName = (String) columnMetadataResultSet.get("TABLE_NAME");
String rawColumnName = (String) columnMetadataResultSet.get("COLUMN_NAME");
String rawSchemaName = StringUtils.trimToNull((String) columnMetadataResultSet.get("TABLE_SCHEM"));
String rawCatalogName = StringUtils.trimToNull((String) columnMetadataResultSet.get("TABLE_CAT"));
String remarks = StringUtils.trimToNull((String) columnMetadataResultSet.get("REMARKS"));
if (remarks != null) {
remarks = remarks.replace("''", "'"); //come back escaped sometimes
}
Column column = new Column();
column.setName(rawColumnName);
column.setRelation(table);
column.setRemarks(remarks);
if (database instanceof OracleDatabase) {
String nullable = columnMetadataResultSet.getString("NULLABLE");
if (nullable.equals("Y")) {
column.setNullable(true);
} else {
column.setNullable(false);
}
} else {
int nullable = columnMetadataResultSet.getInt("NULLABLE");
if (nullable == DatabaseMetaData.columnNoNulls) {
column.setNullable(false);
} else if (nullable == DatabaseMetaData.columnNullable) {
column.setNullable(true);
} else if (nullable == DatabaseMetaData.columnNullableUnknown) {
LogFactory.getLogger().info("Unknown nullable state for column " + column.toString() + ". Assuming nullable");
column.setNullable(true);
}
}
if (database.supportsAutoIncrement()) {
if (table instanceof Table) {
if (columnMetadataResultSet.containsColumn("IS_AUTOINCREMENT")) {
String isAutoincrement = (String) columnMetadataResultSet.get("IS_AUTOINCREMENT");
isAutoincrement = StringUtils.trimToNull(isAutoincrement);
if (isAutoincrement == null) {
column.setAutoIncrementInformation(null);
} else if (isAutoincrement.equals("YES")) {
column.setAutoIncrementInformation(new Column.AutoIncrementInformation());
} else if (isAutoincrement.equals("NO")) {
column.setAutoIncrementInformation(null);
} else if (isAutoincrement.equals("")) {
LogFactory.getLogger().info("Unknown auto increment state for column " + column.toString() + ". Assuming not auto increment");
column.setAutoIncrementInformation(null);
} else {
throw new UnexpectedLiquibaseException("Unknown is_autoincrement value: '" + isAutoincrement+"'");
}
} else {
//probably older version of java, need to select from the column to find out if it is auto-increment
String selectStatement = "select " + database.escapeColumnName(rawCatalogName, rawSchemaName, rawTableName, rawColumnName) + " from " + database.escapeTableName(rawCatalogName, rawSchemaName, rawTableName) + " where 0=1";
LogFactory.getLogger().debug("Checking "+rawTableName+"."+rawCatalogName+" for auto-increment with SQL: '"+selectStatement+"'");
Connection underlyingConnection = ((JdbcConnection) database.getConnection()).getUnderlyingConnection();
Statement statement = null;
ResultSet columnSelectRS = null;
try {
statement = underlyingConnection.createStatement();
columnSelectRS = statement.executeQuery(selectStatement);
if (columnSelectRS.getMetaData().isAutoIncrement(1)) {
column.setAutoIncrementInformation(new Column.AutoIncrementInformation());
} else {
column.setAutoIncrementInformation(null);
}
} finally {
try {
if (statement != null) {
statement.close();
}
} catch (SQLException ignore) {
}
if (columnSelectRS != null) {
columnSelectRS.close();
}
}
}
}
}
DataType type = readDataType(columnMetadataResultSet, column, database);
column.setType(type);
column.setDefaultValue(readDefaultValue(columnMetadataResultSet, column, database));
return column;
}
protected DataType readDataType(CachedRow columnMetadataResultSet, Column column, Database database) throws SQLException {
if (database instanceof OracleDatabase) {
String dataType = columnMetadataResultSet.getString("DATA_TYPE");
dataType = dataType.replace("VARCHAR2", "VARCHAR");
dataType = dataType.replace("NVARCHAR2", "NVARCHAR");
DataType type = new DataType(dataType);
// type.setDataTypeId(dataType);
if (dataType.equalsIgnoreCase("NUMBER")) {
type.setColumnSize(columnMetadataResultSet.getInt("DATA_PRECISION"));
if (type.getColumnSize() == null) {
type.setColumnSize(38);
}
type.setDecimalDigits(columnMetadataResultSet.getInt("DATA_SCALE"));
if (type.getDecimalDigits() == null) {
type.setDecimalDigits(0);
}
// type.setRadix(10);
} else {
type.setColumnSize(columnMetadataResultSet.getInt("DATA_LENGTH"));
if (dataType.equalsIgnoreCase("NCLOB")) {
//no attributes
} else if (dataType.equalsIgnoreCase("NVARCHAR") || dataType.equalsIgnoreCase("NCHAR")) {
//data length is in bytes but specified in chars
type.setColumnSize(type.getColumnSize() / 2);
type.setColumnSizeUnit(DataType.ColumnSizeUnit.CHAR);
} else {
String charUsed = columnMetadataResultSet.getString("CHAR_USED");
DataType.ColumnSizeUnit unit = null;
if ("C".equals(charUsed)) {
unit = DataType.ColumnSizeUnit.CHAR;
}
type.setColumnSizeUnit(unit);
}
}
return type;
}
String columnTypeName = (String) columnMetadataResultSet.get("TYPE_NAME");
if (database instanceof FirebirdDatabase) {
if (columnTypeName.equals("BLOB SUB_TYPE 0")) {
columnTypeName = "BLOB";
}
if (columnTypeName.equals("BLOB SUB_TYPE 1")) {
columnTypeName = "CLOB";
}
}
if (database instanceof MySQLDatabase && (columnTypeName.equalsIgnoreCase("ENUM") || columnTypeName.equalsIgnoreCase("SET"))) {
try {
String boilerLength;
if (columnTypeName.equalsIgnoreCase("ENUM"))
boilerLength = "7";
else // SET
boilerLength = "6";
List<String> enumValues = ExecutorService.getInstance().getExecutor(database).queryForList(new RawSqlStatement("SELECT DISTINCT SUBSTRING_INDEX(SUBSTRING_INDEX(SUBSTRING(COLUMN_TYPE, " + boilerLength + ", LENGTH(COLUMN_TYPE) - " + boilerLength + " - 1 ), \"','\", 1 + units.i + tens.i * 10) , \"','\", -1)\n" +
"FROM INFORMATION_SCHEMA.COLUMNS\n" +
"CROSS JOIN (SELECT 0 AS i UNION SELECT 1 UNION SELECT 2 UNION SELECT 3 UNION SELECT 4 UNION SELECT 5 UNION SELECT 6 UNION SELECT 7 UNION SELECT 8 UNION SELECT 9) units\n" +
"CROSS JOIN (SELECT 0 AS i UNION SELECT 1 UNION SELECT 2 UNION SELECT 3 UNION SELECT 4 UNION SELECT 5 UNION SELECT 6 UNION SELECT 7 UNION SELECT 8 UNION SELECT 9) tens\n" +
"WHERE TABLE_NAME = '"+column.getRelation().getName()+"' \n" +
"AND COLUMN_NAME = '"+column.getName()+"'"), String.class);
String enumClause = "";
for (String enumValue : enumValues) {
enumClause += "'"+enumValue+"', ";
}
enumClause = enumClause.replaceFirst(", $", "");
return new DataType(columnTypeName + "("+enumClause+")");
} catch (DatabaseException e) {
LogFactory.getLogger().warning("Error fetching enum values", e);
}
}
DataType.ColumnSizeUnit columnSizeUnit = DataType.ColumnSizeUnit.BYTE;
int dataType = columnMetadataResultSet.getInt("DATA_TYPE");
Integer columnSize = columnMetadataResultSet.getInt("COLUMN_SIZE");
// don't set size for types like int4, int8 etc
if (database.dataTypeIsNotModifiable(columnTypeName)) {
columnSize = null;
}
Integer decimalDigits = columnMetadataResultSet.getInt("DECIMAL_DIGITS");
if (decimalDigits != null && decimalDigits.equals(0)) {
decimalDigits = null;
}
Integer radix = columnMetadataResultSet.getInt("NUM_PREC_RADIX");
Integer characterOctetLength = columnMetadataResultSet.getInt("CHAR_OCTET_LENGTH");
if (database instanceof DB2Database) {
String typeName = columnMetadataResultSet.getString("TYPE_NAME");
if (typeName.equalsIgnoreCase("DBCLOB") || typeName.equalsIgnoreCase("GRAPHIC") || typeName.equalsIgnoreCase("VARGRAPHIC")) {
if (columnSize != null) {
columnSize = columnSize / 2; //Stored as double length chars
}
}
}
DataType type = new DataType(columnTypeName);
type.setDataTypeId(dataType);
type.setColumnSize(columnSize);
type.setDecimalDigits(decimalDigits);
type.setRadix(radix);
type.setCharacterOctetLength(characterOctetLength);
type.setColumnSizeUnit(columnSizeUnit);
return type;
}
protected Object readDefaultValue(CachedRow columnMetadataResultSet, Column columnInfo, Database database) throws SQLException, DatabaseException {
if (database instanceof MSSQLDatabase) {
Object defaultValue = columnMetadataResultSet.get("COLUMN_DEF");
if (defaultValue != null && defaultValue instanceof String) {
if (defaultValue.equals("(NULL)")) {
columnMetadataResultSet.set("COLUMN_DEF", null);
}
}
}
if (database instanceof OracleDatabase) {
if (columnMetadataResultSet.get("COLUMN_DEF") == null) {
columnMetadataResultSet.set("COLUMN_DEF", columnMetadataResultSet.get("DATA_DEFAULT"));
}
}
Object val = columnMetadataResultSet.get("COLUMN_DEF");
if (!(val instanceof String)) {
return val;
}
String stringVal = (String) val;
if (stringVal.isEmpty()) {
return null;
}
if (stringVal.startsWith("'") && stringVal.endsWith("'")) {
stringVal = stringVal.substring(1, stringVal.length() - 1);
} else if (stringVal.startsWith("((") && stringVal.endsWith("))")) {
stringVal = stringVal.substring(2, stringVal.length() - 2);
} else if (stringVal.startsWith("('") && stringVal.endsWith("')")) {
stringVal = stringVal.substring(2, stringVal.length() - 2);
} else if (stringVal.startsWith("(") && stringVal.endsWith(")")) {
return new DatabaseFunction(stringVal.substring(1, stringVal.length() - 1));
}
int type = Integer.MIN_VALUE;
if (columnInfo.getType().getDataTypeId() != null) {
type = columnInfo.getType().getDataTypeId();
}
String typeName = columnInfo.getType().getTypeName();
Scanner scanner = new Scanner(stringVal.trim());
LiquibaseDataType liquibaseDataType = DataTypeFactory.getInstance().from(columnInfo.getType());
if (type == Types.ARRAY) {
return new DatabaseFunction(stringVal);
} else if ((liquibaseDataType instanceof BigIntType || type == Types.BIGINT)) {
if (scanner.hasNextBigInteger()) {
return scanner.nextBigInteger();
} else {
return new DatabaseFunction(stringVal);
}
} else if (type == Types.BINARY) {
return new DatabaseFunction(stringVal.trim());
} else if (type == Types.BIT) {
if (stringVal.startsWith("b'")) { //mysql returns boolean values as b'0' and b'1'
stringVal = stringVal.replaceFirst("b'", "").replaceFirst("'$", "");
}
stringVal = stringVal.trim();
if (scanner.hasNextBoolean()) {
return scanner.nextBoolean();
} else {
return new Integer(stringVal);
}
} else if (liquibaseDataType instanceof BlobType|| type == Types.BLOB) {
return new DatabaseFunction(stringVal);
} else if ((liquibaseDataType instanceof BooleanType || type == Types.BOOLEAN )) {
if (scanner.hasNextBoolean()) {
return scanner.nextBoolean();
} else {
return new DatabaseFunction(stringVal);
}
} else if (liquibaseDataType instanceof CharType || type == Types.CHAR) {
return stringVal;
} else if (liquibaseDataType instanceof ClobType || type == Types.CLOB) {
return stringVal;
} else if (type == Types.DATALINK) {
return new DatabaseFunction(stringVal);
} else if (liquibaseDataType instanceof DateType || type == Types.DATE) {
if (typeName.equalsIgnoreCase("year")) {
return stringVal.trim();
}
return DataTypeFactory.getInstance().fromDescription("date").sqlToObject(stringVal, database);
} else if ((liquibaseDataType instanceof DecimalType || type == Types.DECIMAL)) {
if (scanner.hasNextBigDecimal()) {
return scanner.nextBigDecimal();
} else {
return new DatabaseFunction(stringVal);
}
} else if (type == Types.DISTINCT) {
return new DatabaseFunction(stringVal);
} else if ((liquibaseDataType instanceof DoubleType || type == Types.DOUBLE)) {
if (scanner.hasNextDouble()) {
return scanner.nextDouble();
} else {
return new DatabaseFunction(stringVal);
}
} else if ((liquibaseDataType instanceof FloatType || type == Types.FLOAT)) {
if (scanner.hasNextFloat()) {
return scanner.nextFloat();
} else {
return new DatabaseFunction(stringVal);
}
} else if ((liquibaseDataType instanceof IntType || type == Types.INTEGER)) {
if (scanner.hasNextInt()) {
return scanner.nextInt();
} else {
return new DatabaseFunction(stringVal);
}
} else if (type == Types.JAVA_OBJECT) {
return new DatabaseFunction(stringVal);
} else if (type == Types.LONGNVARCHAR) {
return stringVal;
} else if (type == Types.LONGVARBINARY) {
return new DatabaseFunction(stringVal);
} else if (type == Types.LONGVARCHAR) {
return stringVal;
} else if (liquibaseDataType instanceof NCharType || type == Types.NCHAR) {
return stringVal;
} else if (type == Types.NCLOB) {
return stringVal;
} else if (type == Types.NULL) {
return null;
} else if ((liquibaseDataType instanceof NumberType || type == Types.NUMERIC)) {
if (scanner.hasNextBigDecimal()) {
return scanner.nextBigDecimal();
} else {
return new DatabaseFunction(stringVal);
}
} else if (liquibaseDataType instanceof NVarcharType || type == Types.NVARCHAR) {
return stringVal;
} else if (type == Types.OTHER) {
if (database instanceof DB2Database && typeName.equalsIgnoreCase("DECFLOAT")) {
return new BigDecimal(stringVal);
}
return new DatabaseFunction(stringVal);
} else if (type == Types.REAL) {
return new BigDecimal(stringVal.trim());
} else if (type == Types.REF) {
return new DatabaseFunction(stringVal);
} else if (type == Types.ROWID) {
return new DatabaseFunction(stringVal);
} else if ((liquibaseDataType instanceof SmallIntType || type == Types.SMALLINT)) {
if (scanner.hasNextInt()) {
return scanner.nextInt();
} else {
return new DatabaseFunction(stringVal);
}
} else if (type == Types.SQLXML) {
return new DatabaseFunction(stringVal);
} else if (type == Types.STRUCT) {
return new DatabaseFunction(stringVal);
} else if (liquibaseDataType instanceof TimeType || type == Types.TIME) {
return DataTypeFactory.getInstance().fromDescription("time").sqlToObject(stringVal, database);
} else if (liquibaseDataType instanceof DateTimeType || liquibaseDataType instanceof TimestampType || type == Types.TIMESTAMP) {
return DataTypeFactory.getInstance().fromDescription("datetime").sqlToObject(stringVal, database);
} else if ((liquibaseDataType instanceof TinyIntType || type == Types.TINYINT)) {
if (scanner.hasNextInt()) {
return scanner.nextInt();
} else {
return new DatabaseFunction(stringVal);
}
} else if (type == Types.VARBINARY) {
return new DatabaseFunction(stringVal);
} else if (liquibaseDataType instanceof VarcharType || type == Types.VARCHAR) {
return stringVal;
} else if (database instanceof MySQLDatabase && typeName.toLowerCase().startsWith("enum")) {
return stringVal;
} else {
LogFactory.getLogger().info("Unknown default value: value '" + stringVal + "' type " + typeName + " (" + type + "), assuming it is a function");
return new DatabaseFunction(stringVal);
}
}
//START CODE FROM SQLITEDatabaseSnapshotGenerator
//// @Override
//// protected void readColumns(DatabaseSnapshot snapshot, String schema, DatabaseMetaData databaseMetaData) throws SQLException, DatabaseException {
//// Database database = snapshot.getDatabase();
//// updateListeners("Reading columns for " + database.toString() + " ...");
////
//// if (database instanceof SQLiteDatabase) {
//// // ...work around for SQLite
//// for (Table cur_table : snapshot.getTables()) {
//// Statement selectStatement = null;
//// ResultSet rs = null;
//// try {
//// selectStatement = ((JdbcConnection) database.getConnection()).getUnderlyingConnection().createStatement();
//// rs = databaseMetaData.getColumns(database.convertRequestedSchemaToCatalog(schema), database.convertRequestedSchemaToSchema(schema), cur_table.getName(), null);
//// if (rs == null) {
//// rs = databaseMetaData.getColumns(database.convertRequestedSchemaToCatalog(schema), database.convertRequestedSchemaToSchema(schema), cur_table.getName(), null);
//// }
//// while ((rs != null) && rs.next()) {
//// readColumnInfo(snapshot, schema, rs);
//// }
//// } finally {
//// if (rs != null) {
//// try {
//// rs.close();
//// } catch (SQLException ignored) {
//// }
//// }
//// if (selectStatement != null) {
//// selectStatement.close();
//// }
//// }
//// }
//// } else {
//// // ...if it is no SQLite database
//// Statement selectStatement = null;
//// ResultSet rs = null;
//// try {
//// selectStatement = ((JdbcConnection) database.getConnection()).getUnderlyingConnection().createStatement();
//// rs = databaseMetaData.getColumns(database.convertRequestedSchemaToCatalog(schema), database.convertRequestedSchemaToSchema(schema), null, null);
//// while (rs.next()) {
//// readColumnInfo(snapshot, schema, rs);
//// }
//// } finally {
//// if (rs != null) {
//// try {
//// rs.close();
//// } catch (SQLException ignored) {
//// }
//// }
//// if (selectStatement != null) {
//// selectStatement.close();
//// }
//// }
//// }
//// }
//
//// private Column readColumnInfo(DatabaseSnapshot snapshot, String schema, ResultSet rs) throws SQLException, DatabaseException {
//// Database database = snapshot.getDatabase();
//// Column columnInfo = new Column();
////
//// String tableName = rs.getString("TABLE_NAME");
//// String columnName = rs.getString("COLUMN_NAME");
//// String schemaName = rs.getString("TABLE_SCHEM");
//// String catalogName = rs.getString("TABLE_CAT");
////
//// String upperCaseTableName = tableName.toUpperCase(Locale.ENGLISH);
////
//// if (database.isSystemTable(catalogName, schemaName, upperCaseTableName) ||
//// database.isLiquibaseTable(upperCaseTableName)) {
//// return null;
//// }
////
//// Table table = snapshot.getTable(tableName);
//// if (table == null) {
//// View view = snapshot.getView(tableName);
//// if (view == null) {
//// LogFactory.getLogger().debug("Could not find table or view " + tableName + " for column " + columnName);
//// return null;
//// } else {
//// columnInfo.setView(view);
//// view.getColumns().add(columnInfo);
//// }
//// } else {
//// columnInfo.setTable(table);
//// table.getColumns().add(columnInfo);
//// }
////
//// columnInfo.setName(columnName);
//// columnInfo.setDataType(rs.getInt("DATA_TYPE"));
//// columnInfo.setColumnSize(rs.getInt("COLUMN_SIZE"));
//// columnInfo.setDecimalDigits(rs.getInt("DECIMAL_POINTS"));
//// Object defaultValue = rs.getObject("COLUMN_DEF");
////// try {
//// //todo columnInfo.setDefaultValue(TypeConverterFactory.getInstance().findTypeConverter(database).convertDatabaseValueToObject(defaultValue, columnInfo.getDataType(), columnInfo.getColumnSize(), columnInfo.getDecimalDigits(), database));
////// } catch (ParseException e) {
////// throw new DatabaseException(e);
////// }
////
//// int nullable = rs.getInt("NULLABLE");
//// if (nullable == DatabaseMetaData.columnNoNulls) {
//// columnInfo.setNullable(false);
//// } else if (nullable == DatabaseMetaData.columnNullable) {
//// columnInfo.setNullable(true);
//// }
////
//// columnInfo.setPrimaryKey(snapshot.isPrimaryKey(columnInfo));
//// columnInfo.setAutoIncrement(isColumnAutoIncrement(database, schema, tableName, columnName));
//// String typeName = rs.getString("TYPE_NAME");
//// if (columnInfo.isAutoIncrement()) {
//// typeName += "{autoIncrement:true}";
//// }
//// columnInfo.setType(DataTypeFactory.getInstance().parse(typeName));
////
//// return columnInfo;
//// }
//END CODE FROM SQLiteDatabaseSnapshotGenerator
//method was from DerbyDatabaseSnapshotGenerator
// @Override
// protected Object readDefaultValue(Map<String, Object> columnMetadataResultSet, Column columnInfo, Database database) throws SQLException, DatabaseException {
// Object val = columnMetadataResultSet.get("COLUMN_DEF");
//
// if (val instanceof String && "GENERATED_BY_DEFAULT".equals(val)) {
// return null;
// }
// return super.readDefaultValue(columnMetadataResultSet, columnInfo, database);
// }
//START CODE FROM MysqlDatabaseSnapshotGenerator
// @Override
// protected Object readDefaultValue(Column columnInfo, ResultSet rs, Database database) throws SQLException, DatabaseException {
// try {
// Object tmpDefaultValue = columnInfo.getType().toLiquibaseType().sqlToObject(tableSchema.get(columnName).get(1), database);
// // this just makes explicit the following implicit behavior defined in the mysql docs:
// // "If an ENUM column is declared to permit NULL, the NULL value is a legal value for
// // the column, and the default value is NULL. If an ENUM column is declared NOT NULL,
// // its default value is the first element of the list of permitted values."
// if (tmpDefaultValue == null && columnInfo.isNullable()) {
// columnInfo.setDefaultValue("NULL");
// }
// // column is NOT NULL, and this causes no "DEFAULT VALUE XXX" to be generated at all. per
// // the above from MySQL docs, this will cause the first value in the enumeration to be the
// // default.
// else if (tmpDefaultValue == null) {
// columnInfo.setDefaultValue(null);
// } else {
// columnInfo.setDefaultValue("'" + database.escapeStringForDatabase(tmpDefaultValue) + "'");
// }
// } catch (ParseException e) {
// throw new DatabaseException(e);
// }
//
// // TEXT and BLOB column types always have null as default value
// } else if (columnTypeName.toLowerCase().equals("text") || columnTypeName.toLowerCase().equals("blob")) {
// columnInfo.setType(new DatabaseDataType(columnTypeName));
// columnInfo.setDefaultValue(null);
//
// // Parsing TIMESTAMP database.convertDatabaseValueToObject() produces incorrect results
// // eg. for default value 0000-00-00 00:00:00 we have 0002-11-30T00:00:00.0 as parsing result
// } else if (columnTypeName.toLowerCase().equals("timestamp") && !"CURRENT_TIMESTAMP".equals(tableSchema.get(columnName).get(1))) {
// columnInfo.setType(new DatabaseDataType(columnTypeName));
// columnInfo.setDefaultValue(tableSchema.get(columnName).get(1));
// } else {
// super.readDefaultValue(columnInfo, rs, database);
// }
//
// }
// @Override
// protected DatabaseDataType readDataType(ResultSet rs, Database database) throws SQLException {
// String columnTypeName = rs.getString("TYPE_NAME");
// String columnName = rs.getString("COLUMN_NAME");
// String tableName = rs.getString("TABLE_NAME");
// String schemaName = rs.getString("TABLE_CAT");
//
// Map<String, List<String>> tableSchema = new HashMap<String, List<String>>();
//
// if (!schemaCache.containsKey(tableName)) {
//
// Statement selectStatement = null;
// ResultSet rsColumnType = null;
// try {
// selectStatement = ((JdbcConnection) database.getConnection()).getUnderlyingConnection().createStatement();
// rsColumnType = selectStatement.executeQuery("DESC "+database.escapeTableName(schemaName, tableName));
//
// while(rsColumnType.next()) {
// List<String> colSchema = new ArrayList<String>();
// colSchema.add(rsColumnType.getString("Type"));
// colSchema.add(rsColumnType.getString("Default"));
// tableSchema.put(rsColumnType.getString("Field"), colSchema);
// }
// } finally {
// if (rsColumnType != null) {
// try {
// rsColumnType.close();
// } catch (SQLException ignore) { }
// }
// if (selectStatement != null) {
// try {
// selectStatement.close();
// } catch (SQLException ignore) { }
// }
// }
//
//
// schemaCache.put(tableName, tableSchema);
//
// }
//
// tableSchema = schemaCache.get(tableName);
//
// // Parse ENUM and SET column types correctly
// if (columnTypeName.toLowerCase().startsWith("enum") || columnTypeName.toLowerCase().startsWith("set")) {
//
// DatabaseDataType dataType = new DatabaseDataType(tableSchema.get(columnName).get(0));
// try {
// Object tmpDefaultValue = dataType.toLiquibaseType().sqlToObject(tableSchema.get(columnName).get(1), database);
// // this just makes explicit the following implicit behavior defined in the mysql docs:
// // "If an ENUM column is declared to permit NULL, the NULL value is a legal value for
// // the column, and the default value is NULL. If an ENUM column is declared NOT NULL,
// // its default value is the first element of the list of permitted values."
// if (tmpDefaultValue == null && columnInfo.isNullable()) {
// columnInfo.setDefaultValue("NULL");
// }
// // column is NOT NULL, and this causes no "DEFAULT VALUE XXX" to be generated at all. per
// // the above from MySQL docs, this will cause the first value in the enumeration to be the
// // default.
// else if (tmpDefaultValue == null) {
// columnInfo.setDefaultValue(null);
// } else {
// columnInfo.setDefaultValue("'" + database.escapeStringForDatabase(tmpDefaultValue) + "'");
// }
// } catch (ParseException e) {
// throw new DatabaseException(e);
// }
//
// // TEXT and BLOB column types always have null as default value
// } else if (columnTypeName.toLowerCase().equals("text") || columnTypeName.toLowerCase().equals("blob")) {
// columnInfo.setType(new DatabaseDataType(columnTypeName));
// columnInfo.setDefaultValue(null);
//
// // Parsing TIMESTAMP database.convertDatabaseValueToObject() produces incorrect results
// // eg. for default value 0000-00-00 00:00:00 we have 0002-11-30T00:00:00.0 as parsing result
// } else if (columnTypeName.toLowerCase().equals("timestamp") && !"CURRENT_TIMESTAMP".equals(tableSchema.get(columnName).get(1))) {
// columnInfo.setType(new DatabaseDataType(columnTypeName));
// columnInfo.setDefaultValue(tableSchema.get(columnName).get(1));
// } else {
// super.readDefaultValue(columnInfo, rs, database);
// }
// }
// @Override
// protected ForeignKeyInfo readForeignKey(ResultSet importedKeyMetadataResultSet) throws DatabaseException, SQLException {
// ForeignKeyInfo fkinfo= super.readForeignKey(importedKeyMetadataResultSet);
// //MySQL in reality doesn't has schemas. It has databases that can have relations like schemas.
// fkinfo.setPkTableSchema(cleanObjectNameFromDatabase(importedKeyMetadataResultSet.getString("PKTABLE_CAT")));
// fkinfo.setFkSchema(cleanObjectNameFromDatabase(importedKeyMetadataResultSet.getString("FKTABLE_CAT")));
// return fkinfo;
// }
//END CODE FROM MySQLDatabaseSNapshotGenerator
//START CODE from InformixSnapshotGenerator
// private static final Map<Integer, String> qualifiers = new HashMap<Integer, String>();
//
// static {
// qualifiers.put(0, "YEAR");
// qualifiers.put(2, "MONTH");
// qualifiers.put(4, "DAY");
// qualifiers.put(6, "HOUR");
// qualifiers.put(8, "MINUTE");
// qualifiers.put(10, "SECOND");
// qualifiers.put(11, "FRACTION(1)");
// qualifiers.put(12, "FRACTION(2)");
// qualifiers.put(13, "FRACTION(3)");
// qualifiers.put(14, "FRACTION(4)");
// qualifiers.put(15, "FRACTION(5)");
// }
// protected DataType readDataType(Map<String, Object> rs, Column column, Database database) throws SQLException {
// // See http://publib.boulder.ibm.com/infocenter/idshelp/v115/topic/com.ibm.sqlr.doc/sqlr07.htm
// String typeName = ((String) rs.get("TYPE_NAME")).toUpperCase();
// if ("DATETIME".equals(typeName) || "INTERVAL".equals(typeName)) {
// int collength = (Integer) rs.get("COLUMN_SIZE");
// //int positions = collength / 256;
// int firstQualifierType = (collength % 256) / 16;
// int lastQualifierType = (collength % 256) % 16;
// String type = "DATETIME".equals(typeName) ? "DATETIME" : "INTERVAL";
// String firstQualifier = qualifiers.get(firstQualifierType);
// String lastQualifier = qualifiers.get(lastQualifierType);
// DataType dataTypeMetaData = new DataType(type + " " + firstQualifier + " TO " + lastQualifier);
// dataTypeMetaData.setColumnSizeUnit(DataType.ColumnSizeUnit.BYTE);
//
// return dataTypeMetaData;
// } else {
// return super.readDataType(rs, column, database);
// }
// }
//END CODE FROM InformaixSnapshotGenerator
//Code below was from OracleDatabaseSnapshotGenerator
// @Override
// protected void readColumns(DatabaseSnapshot snapshot, String schema, DatabaseMetaData databaseMetaData) throws SQLException, DatabaseException {
// findIntegerColumns(snapshot, schema);
// super.readColumns(snapshot, schema, databaseMetaData);
//
// /*
// * Code Description:
// * Finding all 'tablespace' attributes of column's PKs
// * */
// Database database = snapshot.getDatabase();
// Statement statement = null;
// ResultSet rs = null;
// try {
// statement = ((JdbcConnection) database.getConnection()).getUnderlyingConnection().createStatement();
//
// // Setting default schema name. Needed for correct statement generation
// if (schema == null)
// schema = database.convertRequestedSchemaToSchema(schema);
//
// String query = "select ui.tablespace_name TABLESPACE, ucc.table_name TABLE_NAME, ucc.column_name COLUMN_NAME FROM all_indexes ui , all_constraints uc , all_cons_columns ucc where uc.constraint_type = 'P' and ucc.constraint_name = uc.constraint_name and uc.index_name = ui.index_name and uc.owner = '" + schema + "' and ui.table_owner = '" + schema + "' and ucc.owner = '" + schema + "'";
// rs = statement.executeQuery(query);
//
// while (rs.next()) {
// Column column = snapshot.getColumn(rs.getString("TABLE_NAME"), rs.getString("COLUMN_NAME"));
// // setting up tablespace property to column, to configure it's PK-index
// if (column == null) {
// continue; //probably a different schema
// }
// column.setTablespace(rs.getString("TABLESPACE"));
// }
// } finally {
// if (rs != null) {
// try {
// rs.close();
// } catch (SQLException ignore) {
// }
// }
// if (statement != null) {
// try {
// statement.close();
// } catch (SQLException ignore) {
// }
// }
// }
//
// }
//
// /**
// * Method finds all INTEGER columns in snapshot's database
// *
// * @param snapshot current database snapshot
// * @return String list with names of all INTEGER columns
// * @throws java.sql.SQLException execute statement error
// */
// private List<String> findIntegerColumns(DatabaseSnapshot snapshot, String schema) throws SQLException, DatabaseException {
//
// Database database = snapshot.getDatabase();
// // Setting default schema name. Needed for correct statement generation
// if (schema == null) {
// schema = database.convertRequestedSchemaToSchema(schema);
// }
// Statement statement = ((JdbcConnection) database.getConnection()).getUnderlyingConnection().createStatement();
// ResultSet integerListRS = null;
// // Finding all columns created as 'INTEGER'
// try {
// integerListRS = statement.executeQuery("select TABLE_NAME, COLUMN_NAME from all_tab_columns where data_precision is null and data_scale = 0 and data_type = 'NUMBER' and owner = '" + schema + "'");
// while (integerListRS.next()) {
// integerList.add(integerListRS.getString("TABLE_NAME") + "." + integerListRS.getString("COLUMN_NAME"));
// }
// } finally {
// if (integerListRS != null) {
// try {
// integerListRS.close();
// } catch (SQLException ignore) {
// }
// }
//
// if (statement != null) {
// try {
// statement.close();
// } catch (SQLException ignore) {
// }
// }
// }
//
//
// return integerList;
// }
//
//// @Override
//// protected DatabaseDataType readDataType(ResultSet rs, Database database) throws SQLException {
//// if (integerList.contains(column.getTable().getName() + "." + column.getName())) {
//// column.setDataType(Types.INTEGER);
//// } else {
//// column.setDataType(rs.getInt("DATA_TYPE"));
//// }
//// column.setColumnSize(rs.getInt("COLUMN_SIZE"));
//// column.setDecimalDigits(rs.getInt("DECIMAL_DIGITS"));
////
//// // Set true, if precision should be initialize
//// column.setInitPrecision(
//// !((column.getDataType() == Types.DECIMAL ||
//// column.getDataType() == Types.NUMERIC ||
//// column.getDataType() == Types.REAL) && rs.getString("DECIMAL_DIGITS") == null)
//// );
//// }
//
//
//// @Override
//// protected Object readDefaultValue(Column columnInfo, ResultSet rs, Database database) throws SQLException, DatabaseException {
//// super.readDefaultValue(columnInfo, rs, database);
////
//// // Exclusive setting for oracle INTEGER type
//// // Details:
//// // INTEGER means NUMBER type with 'data_precision IS NULL and scale = 0'
//// if (columnInfo.getDataType() == Types.INTEGER) {
//// columnInfo.setType(DataTypeFactory.getInstance().parse("INTEGER"));
//// }
////
//// String columnTypeName = rs.getString("TYPE_NAME");
//// if ("VARCHAR2".equals(columnTypeName)) {
//// int charOctetLength = rs.getInt("CHAR_OCTET_LENGTH");
//// int columnSize = rs.getInt("COLUMN_SIZE");
//// if (columnSize == charOctetLength) {
//// columnInfo.setLengthSemantics(Column.ColumnSizeUnit.BYTE);
//// } else {
//// columnInfo.setLengthSemantics(Column.ColumnSizeUnit.CHAR);
//// }
//// }
//// }
}
| apache-2.0 |
sibay/vertx-web | vertx-template-engines/vertx-web-templ-pebble/src/main/java/io/vertx/ext/web/templ/impl/PebbleTemplateEngineImpl.java | 2482 | /*
* Copyright 2014 Red Hat, Inc.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* and Apache License v2.0 which accompanies this distribution.
*
* The Eclipse Public License is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* The Apache License v2.0 is available at
* http://www.opensource.org/licenses/apache2.0.php
*
* You may elect to redistribute this code under either of these licenses.
*/
package io.vertx.ext.web.templ.impl;
import java.io.StringWriter;
import java.util.HashMap;
import java.util.Map;
import com.mitchellbosecke.pebble.PebbleEngine;
import com.mitchellbosecke.pebble.template.PebbleTemplate;
import io.vertx.core.AsyncResult;
import io.vertx.core.Future;
import io.vertx.core.Handler;
import io.vertx.core.Vertx;
import io.vertx.core.buffer.Buffer;
import io.vertx.ext.web.RoutingContext;
import io.vertx.ext.web.templ.PebbleTemplateEngine;
/**
* @author Dan Kristensen
*/
public class PebbleTemplateEngineImpl extends CachingTemplateEngine<PebbleTemplate> implements PebbleTemplateEngine {
private final PebbleEngine pebbleEngine;
public PebbleTemplateEngineImpl(Vertx vertx) {
super(DEFAULT_TEMPLATE_EXTENSION, DEFAULT_MAX_CACHE_SIZE);
pebbleEngine = new PebbleEngine.Builder().loader(new PebbleVertxLoader(vertx)).build();
}
@Override
public PebbleTemplateEngine setExtension(String extension) {
doSetExtension(extension);
return this;
}
@Override
public PebbleTemplateEngine setMaxCacheSize(int maxCacheSize) {
this.cache.setMaxSize(maxCacheSize);
return this;
}
@Override
public void render(RoutingContext context, String templateFileName, Handler<AsyncResult<Buffer>> handler) {
try {
PebbleTemplate template = cache.get(templateFileName);
if (template == null) {
// real compile
final String loc = adjustLocation(templateFileName);
template = pebbleEngine.getTemplate(loc);
cache.put(templateFileName, template);
}
final Map<String, Object> variables = new HashMap<>(1);
variables.put("context", context);
final StringWriter stringWriter = new StringWriter();
template.evaluate(stringWriter, variables);
handler.handle(Future.succeededFuture(Buffer.buffer(stringWriter.toString())));
} catch (final Exception ex) {
handler.handle(Future.failedFuture(ex));
}
}
}
| apache-2.0 |
punkhorn/camel-upstream | core/camel-core/src/test/java/org/apache/camel/component/file/FileConsumeNoopIdempotentAutoTest.java | 1407 | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.file;
import org.apache.camel.builder.RouteBuilder;
/**
* Auto works just as enabled so we can reuse this time
*/
public class FileConsumeNoopIdempotentAutoTest extends FileConsumeNoopIdempotentEnabledTest {
@Override
protected RouteBuilder createRouteBuilder() throws Exception {
return new RouteBuilder() {
@Override
public void configure() throws Exception {
from("file://target/data/noop?initialDelay=0&delay=10&noop=true").convertBodyTo(String.class).to("mock:result");
}
};
}
} | apache-2.0 |
WangGanxin/DoingDaily | app/src/main/java/com/ganxin/doingdaily/framework/BasePresenter.java | 1034 | package com.ganxin.doingdaily.framework;
import java.lang.ref.Reference;
import java.lang.ref.WeakReference;
/**
* Description : BasePresenter <br/>
* author : WangGanxin <br/>
* date : 2016/10/28 <br/>
* email : ganxinvip@163.com <br/>
*/
public abstract class BasePresenter<T> {
protected Reference<T> mViewRef; //View接口类型的弱引用
protected RxManager mRxManager;
public void attatchView(T view){
mViewRef=new WeakReference<>(view); //建立关联
mRxManager = new RxManager();
this.onStart();
}
public T getView(){
return mViewRef.get();
}
public RxManager getRxManager() {
return mRxManager;
}
public boolean isViewAttachted(){
return mViewRef!=null&&mViewRef.get()!=null;
}
public void detachView(){
if(mViewRef!=null){
mViewRef.clear();
mViewRef=null;
}
if(mRxManager!=null){
mRxManager.clear();
}
}
public abstract void onStart();
}
| apache-2.0 |
BriData/DBus | dbus-commons/src/main/java/com/creditease/dbus/commons/meta/Db2MetaComparator.java | 1307 | /*-
* <<
* DBus
* ==
* Copyright (C) 2016 - 2019 Bridata
* ==
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* >>
*/
package com.creditease.dbus.commons.meta;
import com.creditease.dbus.commons.DataType;
import com.creditease.dbus.commons.MetaWrapper;
import com.creditease.dbus.commons.SupportedDb2DataType;
/**
* 比较两个版本的meta信息的兼容性
* Created by zhenlinzhong on 18/6/26.
*/
public class Db2MetaComparator extends AbstractMetaComparator {
protected boolean isCellSupported(MetaWrapper.MetaCell cell) {
return SupportedDb2DataType.isSupported(cell.getDataType()) && !cell.isHidden() && !cell.isVirtual();
}
protected DataType convert(MetaWrapper.MetaCell cell) {
return DataType.convertDb2DataType(cell.getDataType());
}
}
| apache-2.0 |
Hillkorn/CodinGame | src/main/java/Thor1.java | 3510 |
import java.util.List;
import java.util.Optional;
import java.util.Scanner;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import java.util.stream.Stream;
class Thor1 {
static public class Position {
public int x;
public int y;
public Position(int x, int y) {
this.x = x;
this.y = y;
}
@Override
public String toString() {
return "Position " + x + " " + y;
}
}
public static int TX, TY;
public static void main(String args[]) {
Scanner in = new Scanner(System.in);
TX = in.nextInt();
TY = in.nextInt();
// game loop
while (true) {
System.err.println("Thor " + TX + " " + TY);
int H = in.nextInt(); // the remaining number of hammer strikes.
int N = in.nextInt(); // the number of giants which are still present on the map.
// for (int i = 0; i < N; i++) {
// int X = in.nextInt();
// int Y = in.nextInt();
// }
Stream<Thor1.Position> enemiesStream = IntStream.range(0, N).mapToObj((int i) -> {
int X = in.nextInt();
int Y = in.nextInt();
return new Thor1.Position(X, Y);
});
if (N > H) {
List<Position> sourrouningEnemies = (List<Position>) enemiesStream.filter(p -> calcDistance(p) <= 3).collect(Collectors.toList());
sourrouningEnemies.forEach((Position p) -> System.err.println(p));
long count = sourrouningEnemies.size();
if (count >= N / H || N == count) {
strike();
} else if (count > 0) {
int u = 0, d = 0, l = 0, r = 0;
List<Position> sourEnemiesDist = (List<Position>) sourrouningEnemies.stream().map((Position p) -> calcDistancePosition(p)).collect(Collectors.toList());
for (Position sourEnemy : sourEnemiesDist) {
System.err.println("Enenmy " + sourEnemy);
if (sourEnemy.x > 0) {
r = 1;
} else if (sourEnemy.x < 0) {
l = 1;
}
if (sourEnemy.y > 0) {
u = 1;
} else if (sourEnemy.y < 0) {
d = 1;
}
}
System.err.println("r" + r + " l" + l + " d" + d + " u" + u);
if (d + u + l + r == 4) {
strike();
} else {
move(r - l, u - d);
}
} else {
stay();
}
} else {
Optional<Position> min = enemiesStream.min((Thor1.Position o1, Thor1.Position o2) -> {
return calcDistance(o1) - calcDistance(o2);
});
if (calcDistance(min.get()) <= 1) {
strike();
} else {
stay();
}
}
}
}
public static void strike() {
System.out.println("STRIKE");
}
public static void stay() {
System.out.println("WAIT");
}
public static void move(int dx, int dy) {
String dir = "";
if (dy < 0) {
TY -= 1;
dir += "N";
} else if (dy > 0) {
TY += 1;
dir += "S";
}
if (dx < 0) {
TX -= 1;
dir += "W";
} else if (dx > 0) {
TX += 1;
dir += "E";
}
System.out.println(dir); // The movement or action to be carried out: WAIT STRIKE N NE E SE S SW W or N
}
public static int calcDistance(Position p) {
int dx1 = Math.abs(TX - p.x);
int dy1 = Math.abs(TY - p.y);
return (int) Math.sqrt((dx1 * dx1) + (dy1 * dy1));
}
public static Position calcDistancePosition(Position p) {
return new Position(TX - p.x, TY - p.y);
}
}
| apache-2.0 |
savoirtech/hecate | pojo/src/test/java/com/savoirtech/hecate/pojo/binding/facet/ArrayFacetBindingTest.java | 5109 | /*
* Copyright (c) 2012-2016 Savoir Technologies, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.savoirtech.hecate.pojo.binding.facet;
import com.savoirtech.hecate.pojo.dao.PojoDao;
import com.savoirtech.hecate.pojo.entities.UuidEntity;
import com.savoirtech.hecate.pojo.test.AbstractDaoTestCase;
import com.savoirtech.hecate.test.Cassandra;
import org.junit.Test;
public class ArrayFacetBindingTest extends AbstractDaoTestCase {
//----------------------------------------------------------------------------------------------------------------------
// Other Methods
//----------------------------------------------------------------------------------------------------------------------
@Test
@Cassandra
public void testWithNullPojoArray() {
PojoDao<PojoArrayEntity> dao = createPojoDao(PojoArrayEntity.class);
PojoArrayEntity entity = new PojoArrayEntity();
dao.save(entity);
entity = dao.findByKey(entity.getId());
assertNull(entity.getPojos());
}
@Test
@Cassandra
public void testWithNullPrimitiveArray() {
PojoDao<PrimitiveArrayEntity> dao = createPojoDao(PrimitiveArrayEntity.class);
PrimitiveArrayEntity entity = new PrimitiveArrayEntity();
dao.save(entity);
entity = dao.findByKey(entity.getId());
assertNull(entity.getInts());
}
@Test
@Cassandra
public void testWithPojoArray() {
PojoDao<PojoArrayEntity> dao = createPojoDao(PojoArrayEntity.class);
PojoArrayEntity entity = new PojoArrayEntity();
ElementEntity[] expected = new ElementEntity[] {new ElementEntity(), new ElementEntity()};
entity.setPojos(expected);
dao.save(entity);
entity = dao.findByKey(entity.getId());
assertArrayEquals(expected, entity.getPojos());
}
@Test
@Cassandra
public void testWithPrimitiveArray() {
PojoDao<PrimitiveArrayEntity> dao = createPojoDao(PrimitiveArrayEntity.class);
PrimitiveArrayEntity entity = new PrimitiveArrayEntity();
int[] expected = {3, 1, 4, 1, 5, 9};
entity.setInts(expected);
dao.save(entity);
entity = dao.findByKey(entity.getId());
assertArrayEquals(expected, entity.getInts());
}
@Test
@Cassandra
public void testWithNullElement() {
assertHecateException("Cassandra driver does not support null values inside list<varchar> collections.", () -> {
PojoDao<PojoArrayEntity> dao = createPojoDao(PojoArrayEntity.class);
PojoArrayEntity entity = new PojoArrayEntity();
entity.setPojos(new ElementEntity[] {new ElementEntity(), null, new ElementEntity()});
dao.save(entity);
});
}
//----------------------------------------------------------------------------------------------------------------------
// Inner Classes
//----------------------------------------------------------------------------------------------------------------------
public static class PojoArrayEntity extends UuidEntity {
//----------------------------------------------------------------------------------------------------------------------
// Fields
//----------------------------------------------------------------------------------------------------------------------
private ElementEntity[] pojos;
//----------------------------------------------------------------------------------------------------------------------
// Getter/Setter Methods
//----------------------------------------------------------------------------------------------------------------------
public ElementEntity[] getPojos() {
return pojos;
}
public void setPojos(ElementEntity[] pojos) {
this.pojos = pojos;
}
}
public static class PrimitiveArrayEntity extends UuidEntity {
//----------------------------------------------------------------------------------------------------------------------
// Fields
//----------------------------------------------------------------------------------------------------------------------
private int[] ints;
//----------------------------------------------------------------------------------------------------------------------
// Getter/Setter Methods
//----------------------------------------------------------------------------------------------------------------------
public int[] getInts() {
return ints;
}
public void setInts(int[] ints) {
this.ints = ints;
}
}
} | apache-2.0 |
edward-yakop/flexmojos | flexmojos-testing/flexmojos-tester/src/main/java/org/sonatype/flexmojos/test/report/ErrorReport.java | 1575 | /**
* Copyright 2008 Marvin Herman Froeder
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
*
*/
package org.sonatype.flexmojos.test.report;
import org.codehaus.plexus.util.xml.Xpp3Dom;
@SuppressWarnings( "unused" )
public class ErrorReport
{
private String message;
private String stackTrace;
private String type;
private Xpp3Dom dom;
public ErrorReport( Xpp3Dom dom )
{
this.dom = dom;
}
public String getMessage()
{
return dom.getAttribute( "message" );
}
public String getStackTrace()
{
return dom.getAttribute( "stackTrace" );
}
public String getType()
{
return dom.getAttribute( "type" );
}
public void setMessage( String message )
{
throw new UnsupportedOperationException();
}
public void setStackTrace( String stackTrace )
{
throw new UnsupportedOperationException();
}
public void setType( String type )
{
throw new UnsupportedOperationException();
}
}
| apache-2.0 |
googleinterns/play-movies-2020-intern | server/src/main/java/com/google/moviestvsentiments/assetSentiment/UserSentimentRepository.java | 347 | package com.google.moviestvsentiments.assetSentiment;
import org.springframework.data.repository.CrudRepository;
/**
* A Repository that provides functions for accessing and modifying UserSentiment database records.
*/
public interface UserSentimentRepository extends CrudRepository<UserSentiment, UserSentiment.UserSentimentCompositeKey> {
}
| apache-2.0 |
thingtrack/konekti | core/konekti.service.impl/src/main/java/com/thingtrack/konekti/service/impl/internal/InvoiceLineStatusServiceImpl.java | 1239 | package com.thingtrack.konekti.service.impl.internal;
import java.util.List;
import org.springframework.beans.factory.annotation.Autowired;
import com.thingtrack.konekti.dao.api.InvoiceLineStatusDao;
import com.thingtrack.konekti.domain.InvoiceLineStatus;
import com.thingtrack.konekti.service.api.InvoiceLineStatusService;
/**
* @author Thingtrack S.L.
*
*/
public class InvoiceLineStatusServiceImpl implements InvoiceLineStatusService {
@Autowired
private InvoiceLineStatusDao invoiceLineStatusDao;
@Override
public List<InvoiceLineStatus> getAll() throws Exception {
return this.invoiceLineStatusDao.getAll();
}
@Override
public InvoiceLineStatus get(Integer feedbackStatusId) throws Exception {
return this.invoiceLineStatusDao.get(feedbackStatusId);
}
@Override
public InvoiceLineStatus getByCode(String code) throws Exception {
return this.invoiceLineStatusDao.getByCode(code);
}
@Override
public InvoiceLineStatus save(InvoiceLineStatus invoiceLineStatus) throws Exception {
return this.invoiceLineStatusDao.save(invoiceLineStatus);
}
@Override
public void delete(InvoiceLineStatus invoiceLineStatus) throws Exception {
this.invoiceLineStatusDao.delete(invoiceLineStatus);
}
}
| apache-2.0 |
weld/core | tests-arquillian/src/test/java/org/jboss/weld/tests/contexts/request/custom/Result.java | 1127 | /*
* JBoss, Home of Professional Open Source
* Copyright 2015, Red Hat, Inc., and individual contributors
* by the @authors tag. See the copyright.txt in the distribution for a
* full listing of individual contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jboss.weld.tests.contexts.request.custom;
import jakarta.enterprise.inject.Vetoed;
@Vetoed
public class Result {
private final String value;
public Result() {
this.value = null;
}
public Result(String value) {
this.value = value;
}
protected String getValue() {
return value;
}
}
| apache-2.0 |
CrispOSS/abs-api | src/main/java/abs/api/ComparableRunnableFuture.java | 283 | package abs.api;
import java.util.concurrent.RunnableFuture;
/**
* TODO A currently internal API to be documented
*
* @author Behrooz Nobakht
* @since 1.0
*/
public interface ComparableRunnableFuture extends RunnableFuture<Object>,
Comparable<ComparableRunnableFuture> {
}
| apache-2.0 |
jexp/idea2 | plugins/maven/src/test/java/org/jetbrains/idea/maven/tasks/MavenShortcutsManagerTest.java | 8112 | /*
* Copyright 2000-2009 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jetbrains.idea.maven.tasks;
import com.intellij.openapi.actionSystem.ActionManager;
import com.intellij.openapi.util.text.StringUtil;
import com.intellij.openapi.vfs.VirtualFile;
import org.jetbrains.idea.maven.MavenImportingTestCase;
import java.util.Arrays;
import java.util.List;
public class MavenShortcutsManagerTest extends MavenImportingTestCase {
private MavenShortcutsManager myShortcutsManager;
@Override
protected void setUp() throws Exception {
super.setUp();
myShortcutsManager = MavenShortcutsManager.getInstance(myProject);
myShortcutsManager.doInit();
initProjectsManager(true);
}
public void testRefreshingActionsOnImport() throws Exception {
assertTrue(getProjectActions().isEmpty());
VirtualFile p1 = createModulePom("p1", "<groupId>test</groupId>" +
"<artifactId>p1</artifactId>" +
"<version>1</version>");
VirtualFile p2 = createModulePom("p2", "<groupId>test</groupId>" +
"<artifactId>p2</artifactId>" +
"<version>1</version>");
importProjects(p1, p2);
assertKeymapContains(p1, "clean");
assertKeymapContains(p2, "clean");
}
public void testRefreshingOnProjectRead() throws Exception {
importProject("<groupId>test</groupId>" +
"<artifactId>project</artifactId>" +
"<version>1</version>");
assertKeymapContains(myProjectPom, "clean");
importProject("<groupId>test</groupId>" +
"<artifactId>project</artifactId>" +
"<version>1</version>" +
"<build>" +
" <plugins>" +
" <plugin>" +
" <groupId>org.apache.maven.plugins</groupId>" +
" <artifactId>maven-surefire-plugin</artifactId>" +
" </plugin>" +
" </plugins>" +
"</build>");
assertKeymapContains(myProjectPom, "clean");
}
public void testRefreshingOnPluginResolve() throws Exception {
importProject("<groupId>test</groupId>" +
"<artifactId>project</artifactId>" +
"<version>1</version>");
assertKeymapDoesNotContain(myProjectPom, "org.apache.maven.plugins:maven-surefire-plugin:2.4.3:test");
importProject("<groupId>test</groupId>" +
"<artifactId>project</artifactId>" +
"<version>1</version>" +
"<build>" +
" <plugins>" +
" <plugin>" +
" <groupId>org.apache.maven.plugins</groupId>" +
" <artifactId>maven-surefire-plugin</artifactId>" +
" <version>2.4.3</version>" +
" </plugin>" +
" </plugins>" +
"</build>");
resolvePlugins();
assertKeymapContains(myProjectPom, "org.apache.maven.plugins:maven-surefire-plugin:2.4.3:test");
}
public void testActionWhenSeveralSimilarPlugins() throws Exception {
importProject("<groupId>test</groupId>" +
"<artifactId>project</artifactId>" +
"<version>1</version>");
importProject("<groupId>test</groupId>" +
"<artifactId>project</artifactId>" +
"<version>1</version>" +
"<build>" +
" <plugins>" +
" <plugin>" +
" <groupId>org.apache.maven.plugins</groupId>" +
" <artifactId>maven-surefire-plugin</artifactId>" +
" <version>2.4.3</version>" +
" </plugin>" +
" <plugin>" +
" <groupId>org.apache.maven.plugins</groupId>" +
" <artifactId>maven-surefire-plugin</artifactId>" +
" <version>2.4.3</version>" +
" </plugin>" +
" </plugins>" +
"</build>");
resolvePlugins();
assertKeymapContains(myProjectPom, "org.apache.maven.plugins:maven-surefire-plugin:2.4.3:test");
}
public void testRefreshingOnProjectAddition() throws Exception {
importProject("<groupId>test</groupId>" +
"<artifactId>project</artifactId>" +
"<version>1</version>");
VirtualFile m = createModulePom("module", "<groupId>test</groupId>" +
"<artifactId>module</artifactId>" +
"<version>1</version>");
assertKeymapDoesNotContain(m, "clean");
createProjectPom("<groupId>test</groupId>" +
"<artifactId>project</artifactId>" +
"<version>1</version>" +
"<modules>" +
" <module>module</module>" +
"</modules>");
waitForReadingCompletion();
assertKeymapContains(m, "clean");
}
public void testDeletingActionOnProjectRemoval() throws Exception {
VirtualFile p1 = createModulePom("p1", "<groupId>test</groupId>" +
"<artifactId>p1</artifactId>" +
"<version>1</version>");
VirtualFile p2 = createModulePom("p2", "<groupId>test</groupId>" +
"<artifactId>p2</artifactId>" +
"<version>1</version>");
importProjects(p1, p2);
assertKeymapContains(p1, "clean");
assertKeymapContains(p2, "clean");
p1.delete(this);
waitForReadingCompletion();
assertKeymapDoesNotContain(p1, "clean");
assertKeymapContains(p2, "clean");
}
public void testRefreshingActionsOnChangingIgnoreFlag() throws Exception {
VirtualFile p1 = createModulePom("p1", "<groupId>test</groupId>" +
"<artifactId>p1</artifactId>" +
"<version>1</version>");
VirtualFile p2 = createModulePom("p2", "<groupId>test</groupId>" +
"<artifactId>p2</artifactId>" +
"<version>1</version>");
importProjects(p1, p2);
assertKeymapContains(p1, "clean");
assertKeymapContains(p2, "clean");
myProjectsManager.setIgnoredState(Arrays.asList(myProjectsManager.findProject(p1)), true);
assertKeymapDoesNotContain(p1, "clean");
assertKeymapContains(p2, "clean");
myProjectsManager.setIgnoredState(Arrays.asList(myProjectsManager.findProject(p1)), false);
assertKeymapContains(p1, "clean");
assertKeymapContains(p2, "clean");
}
private void assertKeymapContains(VirtualFile pomFile, String goal) {
String id = myShortcutsManager.getActionId(pomFile.getPath(), goal);
assertTrue("Action " + id + " not found among: \n" + StringUtil.join(getProjectActions(), "\n"), getProjectActions().contains(id));
}
private void assertKeymapDoesNotContain(VirtualFile pomFile, String goal) {
String id = myShortcutsManager.getActionId(pomFile.getPath(), goal);
assertFalse(getProjectActions().contains(id));
}
private List<String> getProjectActions() {
String prefix = MavenKeymapExtension.getActionPrefix(myProject, null);
return Arrays.asList(ActionManager.getInstance().getActionIds(prefix));
}
}
| apache-2.0 |
Dennis-Koch/ambeth | jambeth/jambeth-merge/src/main/java/com/koch/ambeth/merge/security/IDefaultSecurityScopeProvider.java | 1498 | package com.koch.ambeth.merge.security;
/*-
* #%L
* jambeth-merge
* %%
* Copyright (C) 2017 Koch Softwaredevelopment
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
* #L%
*/
import com.koch.ambeth.service.model.ISecurityScope;
/**
* Allows to completely customize how and which security scopes will be resolved whenever they are
* not explicitly set for the current thread
*/
public interface IDefaultSecurityScopeProvider {
/**
* Returns the resolved default security scopes. This method is called by the framework
* (implemented by {@link SecurityScopeProvider#getSecurityScopes()}) whenever it is requested to
* resolve security scopes and they are not explicitly set for the current calling thread. You can
* explicitly set thread-local security scopes by calling
* {@link ISecurityScopeProvider#pushSecurityScopes(ISecurityScope, com.koch.ambeth.util.state.IStateRollback...)}.
*
* @return The resolved default security scopes
*/
ISecurityScope[] getDefaultSecurityScopes();
}
| apache-2.0 |
lessthanoptimal/BoofCV | integration/boofcv-swing/src/main/java/boofcv/gui/image/ScaleOptions.java | 1059 | /*
* Copyright (c) 2011-2018, Peter Abeles. All Rights Reserved.
*
* This file is part of BoofCV (http://boofcv.org).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package boofcv.gui.image;
/**
* Specifies different behaviors for automatically scaling an image in a GUI
*
* @author Peter Abeles
*/
public enum ScaleOptions {
/**
* No scaling
*/
NONE,
/**
* Scale down only but not up
*/
DOWN,
/**
* Freely scale up and down to fill the space
*/
ALL,
/**
* Just use whatever the scale has been set to
*/
MANUAL
}
| apache-2.0 |
escardin/camunda-spin | dataformat-json-jackson/src/test/java/org/camunda/spin/javascript/json/tree/JsonTreeMapJsonToJavaJavascriptTest.java | 881 | /* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.camunda.spin.javascript.json.tree;
import org.camunda.spin.impl.test.ScriptEngine;
import org.camunda.spin.json.tree.JsonTreeMapJsonToJavaScriptTest;
/**
* @author Sebastian Menski
*/
@ScriptEngine("javascript")
public class JsonTreeMapJsonToJavaJavascriptTest extends JsonTreeMapJsonToJavaScriptTest {
}
| apache-2.0 |
johnpr01/Aeron | aeron-driver/src/main/java/uk/co/real_logic/aeron/driver/cmd/RemovePendingSetupCmd.java | 1330 | /*
* Copyright 2014 - 2015 Real Logic Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.co.real_logic.aeron.driver.cmd;
import uk.co.real_logic.aeron.driver.ReceiveChannelEndpoint;
import uk.co.real_logic.aeron.driver.Receiver;
public class RemovePendingSetupCmd implements ReceiverCmd
{
private final int sessionId;
private final int streamId;
private final ReceiveChannelEndpoint channelEndpoint;
public RemovePendingSetupCmd(final ReceiveChannelEndpoint channelEndpoint, final int sessionId, final int streamId)
{
this.sessionId = sessionId;
this.streamId = streamId;
this.channelEndpoint = channelEndpoint;
}
public void execute(final Receiver receiver)
{
receiver.onRemovePendingSetup(channelEndpoint, sessionId, streamId);
}
}
| apache-2.0 |
xuwoool/JStudy | src/com/jstudy/sys/workflow/CityProjectTestCase.java | 1293 | package com.jstudy.sys.workflow;
import org.jbpm.api.Configuration;
import org.jbpm.api.ProcessEngine;
import org.jbpm.api.RepositoryService;
import junit.framework.TestCase;
import com.jstudy.sys.vo.CityProjectForm;
public class CityProjectTestCase extends TestCase {
private ProcessEngine processEngine;
private RepositoryService repositoryService;
private CityProjectForm form = null;
@Override
protected void setUp() throws Exception {
processEngine = new Configuration().setResource("jbpm.cfg.xml").buildProcessEngine();
repositoryService = processEngine.getRepositoryService();
super.setUp();
}
@Override
protected void tearDown() throws Exception {
// TODO Auto-generated method stub
super.tearDown();
}
/**
* 发布流程
*/
public void test01deployWorkFlow() {
repositoryService.createDeployment().addResourceFromClasspath("com/jstudy/sys/workflow/CityProject.jpdl.xml").deploy();
}
/**
* 填写表单
*/
private void writeForm() {
form = new CityProjectForm();
form.setProjectCd("B1423601");
form.setApprovalName("关于太湖路-山西路的立项");
form.setCityCode("21");
form.setCreateUser("tianhw");
form.setProjectInvAt(200.00);
form.setTaskFlag(1000L);
}
public void test03submit() {
writeForm();
}
}
| apache-2.0 |
Madeyedexter/ud851-exercises-student | Lesson04a-Starting-New-Activities/T04a.03-Exercise-PassingDataBetweenActivities/app/src/main/java/com/example/android/explicitintent/MainActivity.java | 3742 | /*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.example.android.explicitintent;
import android.content.Context;
import android.content.Intent;
import android.support.v7.app.AppCompatActivity;
import android.os.Bundle;
import android.view.View;
import android.view.View.OnClickListener;
import android.widget.Button;
import android.widget.EditText;
public class MainActivity extends AppCompatActivity {
/* Fields that will store our EditText and Button */
private EditText mNameEntry;
private Button mDoSomethingCoolButton;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
/*
* Using findViewById, we get a reference to our Button from xml. This allows us to
* do things like set the onClickListener which determines what happens when the button
* is clicked.
*/
mDoSomethingCoolButton = (Button) findViewById(R.id.b_do_something_cool);
mNameEntry = (EditText) findViewById(R.id.et_text_entry);
/* Setting an OnClickListener allows us to do something when this button is clicked. */
mDoSomethingCoolButton.setOnClickListener(new OnClickListener() {
/**
* The onClick method is triggered when this button (mDoSomethingCoolButton) is clicked.
*
* @param v The view that is clicked. In this case, it's mDoSomethingCoolButton.
*/
@Override
public void onClick(View v) {
// DONE (1) Retrieve the text from the EditText and store it in a variable
String text = mNameEntry.getText().toString();
/*
* Storing the Context in a variable in this case is redundant since we could have
* just used "this" or "MainActivity.this" in the method call below. However, we
* wanted to demonstrate what parameter we were using "MainActivity.this" for as
* clear as possible.
*/
Context context = MainActivity.this;
/* This is the class that we want to start (and open) when the button is clicked. */
Class destinationActivity = ChildActivity.class;
/*
* Here, we create the Intent that will start the Activity we specified above in
* the destinationActivity variable. The constructor for an Intent also requires a
* context, which we stored in the variable named "context".
*/
Intent startChildActivityIntent = new Intent(context, destinationActivity);
// DONE (2) Use the putExtra method to put the String from the EditText in the Intent
startChildActivityIntent.putExtra("text",text);
/*
* Once the Intent has been created, we can use Activity's method, "startActivity"
* to start the ChildActivity.
*/
startActivity(startChildActivityIntent);
}
});
}
}
| apache-2.0 |
jmacglashan/burlap | src/main/java/burlap/shell/command/world/GenerateStateCommand.java | 1256 | package burlap.shell.command.world;
import burlap.mdp.stochasticgames.world.World;
import burlap.shell.BurlapShell;
import burlap.shell.SGWorldShell;
import burlap.shell.command.ShellCommand;
import joptsimple.OptionParser;
import joptsimple.OptionSet;
import java.io.PrintStream;
import java.util.Scanner;
/**
* A {@link burlap.shell.command.ShellCommand} for generating a new state in a {@link World}
* according to the {@link World}'s assigned {@link burlap.mdp.auxiliary.StateGenerator}.
* Use the -h option for help information.
* @author James MacGlashan.
*/
public class GenerateStateCommand implements ShellCommand {
protected OptionParser parser = new OptionParser("vh*");
@Override
public String commandName() {
return "gs";
}
@Override
public int call(BurlapShell shell, String argString, Scanner is, PrintStream os) {
OptionSet oset = this.parser.parse(argString.split(" "));
if(oset.has("h")) {
os.println("[-v]\nCauses the world to generate a new initial state.\n\n" +
"-v: print the new state after generating it.");
return 0;
}
World w = ((SGWorldShell)shell).getWorld();
w.generateNewCurrentState();
if(oset.has("v")){
os.println(w.getCurrentWorldState().toString());
}
return 1;
}
}
| apache-2.0 |
hortonworks/cloudbreak | mock-infrastructure/src/generated/com/sequenceiq/mock/swagger/model/ApiParcelUsageHost.java | 3207 | package com.sequenceiq.mock.swagger.model;
import java.util.Objects;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.sequenceiq.mock.swagger.model.ApiHostRef;
import com.sequenceiq.mock.swagger.model.ApiParcelUsageRole;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import java.util.ArrayList;
import java.util.List;
import org.springframework.validation.annotation.Validated;
import javax.validation.Valid;
import javax.validation.constraints.*;
/**
* This object is used to represent a host within an ApiParcelUsage.
*/
@ApiModel(description = "This object is used to represent a host within an ApiParcelUsage.")
@Validated
@javax.annotation.Generated(value = "io.swagger.codegen.languages.SpringCodegen", date = "2021-12-10T21:24:30.629+01:00")
public class ApiParcelUsageHost {
@JsonProperty("hostRef")
private ApiHostRef hostRef = null;
@JsonProperty("roles")
@Valid
private List<ApiParcelUsageRole> roles = null;
public ApiParcelUsageHost hostRef(ApiHostRef hostRef) {
this.hostRef = hostRef;
return this;
}
/**
* A reference to the corresponding Host object.
* @return hostRef
**/
@ApiModelProperty(value = "A reference to the corresponding Host object.")
@Valid
public ApiHostRef getHostRef() {
return hostRef;
}
public void setHostRef(ApiHostRef hostRef) {
this.hostRef = hostRef;
}
public ApiParcelUsageHost roles(List<ApiParcelUsageRole> roles) {
this.roles = roles;
return this;
}
public ApiParcelUsageHost addRolesItem(ApiParcelUsageRole rolesItem) {
if (this.roles == null) {
this.roles = new ArrayList<>();
}
this.roles.add(rolesItem);
return this;
}
/**
* A collection of the roles present on the host.
* @return roles
**/
@ApiModelProperty(value = "A collection of the roles present on the host.")
@Valid
public List<ApiParcelUsageRole> getRoles() {
return roles;
}
public void setRoles(List<ApiParcelUsageRole> roles) {
this.roles = roles;
}
@Override
public boolean equals(java.lang.Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
ApiParcelUsageHost apiParcelUsageHost = (ApiParcelUsageHost) o;
return Objects.equals(this.hostRef, apiParcelUsageHost.hostRef) &&
Objects.equals(this.roles, apiParcelUsageHost.roles);
}
@Override
public int hashCode() {
return Objects.hash(hostRef, roles);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("class ApiParcelUsageHost {\n");
sb.append(" hostRef: ").append(toIndentedString(hostRef)).append("\n");
sb.append(" roles: ").append(toIndentedString(roles)).append("\n");
sb.append("}");
return sb.toString();
}
/**
* Convert the given object to string with each line indented by 4 spaces
* (except the first line).
*/
private String toIndentedString(java.lang.Object o) {
if (o == null) {
return "null";
}
return o.toString().replace("\n", "\n ");
}
}
| apache-2.0 |
phanichaitanya/openbrain | src/main/java/openbrain/peoplesearch/wiki/index/merge/SecondaryIndexer.java | 1472 | package openbrain.peoplesearch.wiki.index.merge;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.io.RandomAccessFile;
public class SecondaryIndexer {
public static void buildSecIndex(String inFilename, String outFilename,
int blockSize) throws IOException {
RandomAccessFile priIndexFile = new RandomAccessFile(new File(inFilename),
"r");
// BufferedReader priIndex= new BufferedReader(new FileReader ((File)
// priIndexFile));
BufferedWriter secIndex = new BufferedWriter(new FileWriter(outFilename));
long offset = priIndexFile.getFilePointer();
String curLine = priIndexFile.readLine();
long linenum = 0;
while (curLine != null) {
if (linenum % blockSize == 0) {
// System.out.println("line--> " + curLine);
// System.out.println("Offset-->" + offset);
secIndex.write((curLine.split(" => "))[0] + "#" + offset + "\n");
}
offset = priIndexFile.getFilePointer();
curLine = priIndexFile.readLine();
linenum++;
}
priIndexFile.close();
secIndex.close();
}
public static void main(String[] args) throws IOException {
if (args.length != 3) {
System.out.println("\nUsage: java -cp people.jar:lib/* merge.SecondaryIndexer <FinalIndexFile> <OutputFile> <BlockSize>");
System.exit(1);
}
buildSecIndex(args[0], args[1], Integer.valueOf(args[2]));
}
}
| apache-2.0 |
xhoong/incubator-calcite | elasticsearch/src/main/java/org/apache/calcite/adapter/elasticsearch/ElasticsearchRules.java | 10603 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to you under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.calcite.adapter.elasticsearch;
import org.apache.calcite.adapter.enumerable.RexImpTable;
import org.apache.calcite.adapter.enumerable.RexToLixTranslator;
import org.apache.calcite.adapter.java.JavaTypeFactory;
import org.apache.calcite.plan.Convention;
import org.apache.calcite.plan.RelOptRule;
import org.apache.calcite.plan.RelTrait;
import org.apache.calcite.plan.RelTraitSet;
import org.apache.calcite.rel.InvalidRelException;
import org.apache.calcite.rel.RelCollations;
import org.apache.calcite.rel.RelNode;
import org.apache.calcite.rel.convert.ConverterRule;
import org.apache.calcite.rel.core.Sort;
import org.apache.calcite.rel.logical.LogicalAggregate;
import org.apache.calcite.rel.logical.LogicalFilter;
import org.apache.calcite.rel.logical.LogicalProject;
import org.apache.calcite.rel.type.RelDataType;
import org.apache.calcite.rex.RexCall;
import org.apache.calcite.rex.RexInputRef;
import org.apache.calcite.rex.RexLiteral;
import org.apache.calcite.rex.RexNode;
import org.apache.calcite.rex.RexVisitorImpl;
import org.apache.calcite.sql.SqlKind;
import org.apache.calcite.sql.fun.SqlStdOperatorTable;
import org.apache.calcite.sql.type.SqlTypeName;
import org.apache.calcite.sql.validate.SqlValidatorUtil;
import java.util.AbstractList;
import java.util.ArrayList;
import java.util.List;
/**
* Rules and relational operators for
* {@link ElasticsearchRel#CONVENTION ELASTICSEARCH}
* calling convention.
*/
class ElasticsearchRules {
static final RelOptRule[] RULES = {
ElasticsearchSortRule.INSTANCE,
ElasticsearchFilterRule.INSTANCE,
ElasticsearchProjectRule.INSTANCE,
ElasticsearchAggregateRule.INSTANCE
};
private ElasticsearchRules() {}
/**
* Returns 'string' if it is a call to item['string'], null otherwise.
* @param call current relational expression
* @return literal value
*/
private static String isItemCall(RexCall call) {
if (call.getOperator() != SqlStdOperatorTable.ITEM) {
return null;
}
final RexNode op0 = call.getOperands().get(0);
final RexNode op1 = call.getOperands().get(1);
if (op0 instanceof RexInputRef
&& ((RexInputRef) op0).getIndex() == 0
&& op1 instanceof RexLiteral
&& ((RexLiteral) op1).getValue2() instanceof String) {
return (String) ((RexLiteral) op1).getValue2();
}
return null;
}
/**
* Checks if current node represents item access as in {@code _MAP['foo']} or
* {@code cast(_MAP['foo'] as integer)}
*
* @return true if expression is item, false otherwise
*/
static boolean isItem(RexNode node) {
final Boolean result = node.accept(new RexVisitorImpl<Boolean>(false) {
@Override public Boolean visitCall(final RexCall call) {
return isItemCall(uncast(call)) != null;
}
});
return Boolean.TRUE.equals(result);
}
/**
* Unwraps cast expressions from current call. {@code cast(cast(expr))} becomes {@code expr}.
*/
private static RexCall uncast(RexCall maybeCast) {
if (maybeCast.getKind() == SqlKind.CAST && maybeCast.getOperands().get(0) instanceof RexCall) {
return uncast((RexCall) maybeCast.getOperands().get(0));
}
// not a cast
return maybeCast;
}
static List<String> elasticsearchFieldNames(final RelDataType rowType) {
return SqlValidatorUtil.uniquify(
new AbstractList<String>() {
@Override public String get(int index) {
final String name = rowType.getFieldList().get(index).getName();
return name.startsWith("$") ? "_" + name.substring(2) : name;
}
@Override public int size() {
return rowType.getFieldCount();
}
},
SqlValidatorUtil.EXPR_SUGGESTER, true);
}
static String quote(String s) {
return "\"" + s + "\"";
}
static String stripQuotes(String s) {
return s.length() > 1 && s.startsWith("\"") && s.endsWith("\"")
? s.substring(1, s.length() - 1) : s;
}
/**
* Translator from {@link RexNode} to strings in Elasticsearch's expression
* language.
*/
static class RexToElasticsearchTranslator extends RexVisitorImpl<String> {
private final JavaTypeFactory typeFactory;
private final List<String> inFields;
RexToElasticsearchTranslator(JavaTypeFactory typeFactory, List<String> inFields) {
super(true);
this.typeFactory = typeFactory;
this.inFields = inFields;
}
@Override public String visitLiteral(RexLiteral literal) {
if (literal.getValue() == null) {
return "null";
}
return "\"literal\":\""
+ RexToLixTranslator.translateLiteral(literal, literal.getType(),
typeFactory, RexImpTable.NullAs.NOT_POSSIBLE)
+ "\"";
}
@Override public String visitInputRef(RexInputRef inputRef) {
return quote(inFields.get(inputRef.getIndex()));
}
@Override public String visitCall(RexCall call) {
final String name = isItemCall(call);
if (name != null) {
return name;
}
final List<String> strings = visitList(call.operands);
if (call.getKind() == SqlKind.CAST) {
return call.getOperands().get(0).accept(this);
}
if (call.getOperator() == SqlStdOperatorTable.ITEM) {
final RexNode op1 = call.getOperands().get(1);
if (op1 instanceof RexLiteral && op1.getType().getSqlTypeName() == SqlTypeName.INTEGER) {
return stripQuotes(strings.get(0)) + "[" + ((RexLiteral) op1).getValue2() + "]";
}
}
throw new IllegalArgumentException("Translation of " + call
+ " is not supported by ElasticsearchProject");
}
List<String> visitList(List<RexNode> list) {
final List<String> strings = new ArrayList<>();
for (RexNode node: list) {
strings.add(node.accept(this));
}
return strings;
}
}
/**
* Base class for planner rules that convert a relational expression to
* Elasticsearch calling convention.
*/
abstract static class ElasticsearchConverterRule extends ConverterRule {
final Convention out;
ElasticsearchConverterRule(Class<? extends RelNode> clazz, RelTrait in, Convention out,
String description) {
super(clazz, in, out, description);
this.out = out;
}
}
/**
* Rule to convert a {@link org.apache.calcite.rel.core.Sort} to an
* {@link ElasticsearchSort}.
*/
private static class ElasticsearchSortRule extends ElasticsearchConverterRule {
private static final ElasticsearchSortRule INSTANCE =
new ElasticsearchSortRule();
private ElasticsearchSortRule() {
super(Sort.class, Convention.NONE, ElasticsearchRel.CONVENTION,
"ElasticsearchSortRule");
}
@Override public RelNode convert(RelNode relNode) {
final Sort sort = (Sort) relNode;
final RelTraitSet traitSet = sort.getTraitSet().replace(out).replace(sort.getCollation());
return new ElasticsearchSort(relNode.getCluster(), traitSet,
convert(sort.getInput(), traitSet.replace(RelCollations.EMPTY)), sort.getCollation(),
sort.offset, sort.fetch);
}
}
/**
* Rule to convert a {@link org.apache.calcite.rel.logical.LogicalFilter} to an
* {@link ElasticsearchFilter}.
*/
private static class ElasticsearchFilterRule extends ElasticsearchConverterRule {
private static final ElasticsearchFilterRule INSTANCE = new ElasticsearchFilterRule();
private ElasticsearchFilterRule() {
super(LogicalFilter.class, Convention.NONE, ElasticsearchRel.CONVENTION,
"ElasticsearchFilterRule");
}
@Override public RelNode convert(RelNode relNode) {
final LogicalFilter filter = (LogicalFilter) relNode;
final RelTraitSet traitSet = filter.getTraitSet().replace(out);
return new ElasticsearchFilter(relNode.getCluster(), traitSet,
convert(filter.getInput(), out),
filter.getCondition());
}
}
/**
* Rule to convert an {@link org.apache.calcite.rel.logical.LogicalAggregate}
* to an {@link ElasticsearchAggregate}.
*/
private static class ElasticsearchAggregateRule extends ElasticsearchConverterRule {
static final RelOptRule INSTANCE = new ElasticsearchAggregateRule();
private ElasticsearchAggregateRule() {
super(LogicalAggregate.class, Convention.NONE, ElasticsearchRel.CONVENTION,
"ElasticsearchAggregateRule");
}
public RelNode convert(RelNode rel) {
final LogicalAggregate agg = (LogicalAggregate) rel;
final RelTraitSet traitSet = agg.getTraitSet().replace(out);
try {
return new ElasticsearchAggregate(
rel.getCluster(),
traitSet,
convert(agg.getInput(), traitSet.simplify()),
agg.getGroupSet(),
agg.getGroupSets(),
agg.getAggCallList());
} catch (InvalidRelException e) {
return null;
}
}
}
/**
* Rule to convert a {@link org.apache.calcite.rel.logical.LogicalProject}
* to an {@link ElasticsearchProject}.
*/
private static class ElasticsearchProjectRule extends ElasticsearchConverterRule {
private static final ElasticsearchProjectRule INSTANCE = new ElasticsearchProjectRule();
private ElasticsearchProjectRule() {
super(LogicalProject.class, Convention.NONE, ElasticsearchRel.CONVENTION,
"ElasticsearchProjectRule");
}
@Override public RelNode convert(RelNode relNode) {
final LogicalProject project = (LogicalProject) relNode;
final RelTraitSet traitSet = project.getTraitSet().replace(out);
return new ElasticsearchProject(project.getCluster(), traitSet,
convert(project.getInput(), out), project.getProjects(), project.getRowType());
}
}
}
// End ElasticsearchRules.java
| apache-2.0 |
jiangdequan/ServletDemo | src/com/boyue/user/servlet/UserLogoutServlet.java | 1104 | package com.boyue.user.servlet;
import java.io.IOException;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.servlet.http.HttpSession;
import com.boyue.common.PageForward;
/**
* 用户退出处理
*
* @author dequan
*
*/
public class UserLogoutServlet extends HttpServlet {
private static final long serialVersionUID = 8073319760057438033L;
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
doPost(req, resp);
}
/**
* 用户退出处理
*
*/
@Override
protected void doPost(HttpServletRequest _request,
HttpServletResponse _response) {
HttpSession session = _request.getSession();
session.removeAttribute("username");
session.invalidate();
String url = "index.jsp";
PageForward.pageForward(_request, _response, url);
}
}
| apache-2.0 |
everttigchelaar/camel-svn | components/camel-web/src/main/java/org/apache/camel/web/resources/RouteStatusResource.java | 3448 | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.web.resources;
import java.net.URI;
import javax.ws.rs.Consumes;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.Produces;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import com.sun.jersey.api.representation.Form;
import org.apache.camel.CamelContext;
import org.apache.camel.ServiceStatus;
import org.apache.camel.model.RouteDefinition;
/**
* Represents the status of a single single Camel Route which is used to implement one or more
* <a href="http://camel.apache.org/enterprise-integration-patterns.html">Enterprise Integration Paterns</a>
*
* @version
*/
public class RouteStatusResource {
private RouteResource routeResource;
public RouteStatusResource(RouteResource routeResource) {
this.routeResource = routeResource;
}
public RouteDefinition getRoute() {
return routeResource.getRoute();
}
public CamelContext getCamelContext() {
return routeResource.getCamelContext();
}
@GET
@Produces(MediaType.TEXT_PLAIN)
public String getStatusText() {
ServiceStatus status = getStatus();
if (status != null) {
return status.toString();
}
return null;
}
public ServiceStatus getStatus() {
return getRoute().getStatus(getCamelContext());
}
@POST
@Consumes(MediaType.TEXT_PLAIN)
public Response setStatus(String status) throws Exception {
if (status != null) {
if (status.equalsIgnoreCase("start")) {
getCamelContext().startRoute(getRoute());
return Response.ok().build();
} else if (status.equalsIgnoreCase("stop")) {
getCamelContext().stopRoute(getRoute());
return Response.ok().build();
}
}
return Response.noContent().build();
}
/**
* Sets the status of this route to either "start" or "stop"
*
* @param formData is the form data POSTed typically from a HTML form with the <code>status</code> field
* set to either "start" or "stop"
*/
@POST
@Consumes("application/x-www-form-urlencoded")
public Response setStatus(Form formData) throws Exception {
// TODO replace the Form class with an injected bean?
System.out.println("Received form! " + formData);
String status = formData.getFirst("status", String.class);
setStatus(status);
return Response.seeOther(new URI("/routes")).build();
}
}
| apache-2.0 |
YangYongZhi/cpims | src/com/gtm/cpims/business/dataapp/statistics/ReportTitleService.java | 843 | package com.gtm.cpims.business.dataapp.statistics;
import java.util.Map;
import org.apache.poi.hssf.usermodel.HSSFWorkbook;
/**
* 统计报表表头生成接口.
*
* @author yangyongzhi
*
*/
public interface ReportTitleService {
/**
* 填充统计表Excel表头.
*
* @param excelObject
* @param tableIndex
*/
public HSSFWorkbook fillExcelTitle(Map<String, String> keyValue, String tableIndex);
/**
* 获取统计报表HTML表头.
*
* @param tableIndex
* @return
*/
public String getHtmlTitle(Map<String, String> keyValue, String tableIndex);
/**
* 生成HTML格式垂直表头.
*
* @param tableIndex
* @return
*/
public Map<String, String> getHtmlVerticalTitleMap(Map<String, String> keyValue,
String tableIndex);
}
| apache-2.0 |
StackVista/hbase | hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java | 172348 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.util;
import java.io.Closeable;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.net.InetAddress;
import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.SortedMap;
import java.util.SortedSet;
import java.util.TreeMap;
import java.util.TreeSet;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.FutureTask;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Multimap;
import com.google.common.collect.TreeMultimap;
import com.google.protobuf.ServiceException;
import org.apache.commons.lang.RandomStringUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.MetaScanner;
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.RowMutations;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL;
import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
import org.apache.hadoop.hbase.security.AccessDeniedException;
import org.apache.hadoop.hbase.security.UserProvider;
import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator;
import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE;
import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker;
import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler;
import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandlerImpl;
import org.apache.hadoop.hbase.util.hbck.TableLockChecker;
import org.apache.hadoop.hbase.wal.WAL;
import org.apache.hadoop.hbase.wal.WALFactory;
import org.apache.hadoop.hbase.wal.WALSplitter;
import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.zookeeper.KeeperException;
/**
* HBaseFsck (hbck) is a tool for checking and repairing region consistency and
* table integrity problems in a corrupted HBase.
* <p>
* Region consistency checks verify that hbase:meta, region deployment on region
* servers and the state of data in HDFS (.regioninfo files) all are in
* accordance.
* <p>
* Table integrity checks verify that all possible row keys resolve to exactly
* one region of a table. This means there are no individual degenerate
* or backwards regions; no holes between regions; and that there are no
* overlapping regions.
* <p>
* The general repair strategy works in two phases:
* <ol>
* <li> Repair Table Integrity on HDFS. (merge or fabricate regions)
* <li> Repair Region Consistency with hbase:meta and assignments
* </ol>
* <p>
* For table integrity repairs, the tables' region directories are scanned
* for .regioninfo files. Each table's integrity is then verified. If there
* are any orphan regions (regions with no .regioninfo files) or holes, new
* regions are fabricated. Backwards regions are sidelined as well as empty
* degenerate (endkey==startkey) regions. If there are any overlapping regions,
* a new region is created and all data is merged into the new region.
* <p>
* Table integrity repairs deal solely with HDFS and could potentially be done
* offline -- the hbase region servers or master do not need to be running.
* This phase can eventually be used to completely reconstruct the hbase:meta table in
* an offline fashion.
* <p>
* Region consistency requires three conditions -- 1) valid .regioninfo file
* present in an HDFS region dir, 2) valid row with .regioninfo data in META,
* and 3) a region is deployed only at the regionserver that was assigned to
* with proper state in the master.
* <p>
* Region consistency repairs require hbase to be online so that hbck can
* contact the HBase master and region servers. The hbck#connect() method must
* first be called successfully. Much of the region consistency information
* is transient and less risky to repair.
* <p>
* If hbck is run from the command line, there are a handful of arguments that
* can be used to limit the kinds of repairs hbck will do. See the code in
* {@link #printUsageAndExit()} for more details.
*/
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
@InterfaceStability.Evolving
public class HBaseFsck extends Configured implements Closeable {
public static final long DEFAULT_TIME_LAG = 60000; // default value of 1 minute
public static final long DEFAULT_SLEEP_BEFORE_RERUN = 10000;
private static final int MAX_NUM_THREADS = 50; // #threads to contact regions
private static boolean rsSupportsOffline = true;
private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;
private static final int DEFAULT_MAX_MERGE = 5;
private static final String TO_BE_LOADED = "to_be_loaded";
private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";
private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;
private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200;
/**********************
* Internal resources
**********************/
private static final Log LOG = LogFactory.getLog(HBaseFsck.class.getName());
private ClusterStatus status;
private ClusterConnection connection;
private Admin admin;
private Table meta;
// threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions
protected ExecutorService executor;
private long startMillis = System.currentTimeMillis();
private HFileCorruptionChecker hfcc;
private int retcode = 0;
private Path HBCK_LOCK_PATH;
private FSDataOutputStream hbckOutFd;
// This lock is to prevent cleanup of balancer resources twice between
// ShutdownHook and the main code. We cleanup only if the connect() is
// successful
private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);
/***********
* Options
***********/
private static boolean details = false; // do we display the full report
private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older
private boolean fixAssignments = false; // fix assignment errors?
private boolean fixMeta = false; // fix meta errors?
private boolean checkHdfs = true; // load and check fs consistency?
private boolean fixHdfsHoles = false; // fix fs holes?
private boolean fixHdfsOverlaps = false; // fix fs overlaps (risky)
private boolean fixHdfsOrphans = false; // fix fs holes (missing .regioninfo)
private boolean fixTableOrphans = false; // fix fs holes (missing .tableinfo)
private boolean fixVersionFile = false; // fix missing hbase.version file in hdfs
private boolean fixSplitParents = false; // fix lingering split parents
private boolean fixReferenceFiles = false; // fix lingering reference store file
private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows
private boolean fixTableLocks = false; // fix table locks which are expired
private boolean fixAny = false; // Set to true if any of the fix is required.
// limit checking/fixes to listed tables, if empty attempt to check/fix all
// hbase:meta are always checked
private Set<TableName> tablesIncluded = new HashSet<TableName>();
private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge
// maximum number of overlapping regions to sideline
private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE;
private boolean sidelineBigOverlaps = false; // sideline overlaps with >maxMerge regions
private Path sidelineDir = null;
private boolean rerun = false; // if we tried to fix something, rerun hbck
private static boolean summary = false; // if we want to print less output
private boolean checkMetaOnly = false;
private boolean checkRegionBoundaries = false;
private boolean ignorePreCheckPermission = false; // if pre-check permission
/*********
* State
*********/
final private ErrorReporter errors;
int fixes = 0;
/**
* This map contains the state of all hbck items. It maps from encoded region
* name to HbckInfo structure. The information contained in HbckInfo is used
* to detect and correct consistency (hdfs/meta/deployment) problems.
*/
private TreeMap<String, HbckInfo> regionInfoMap = new TreeMap<String, HbckInfo>();
// Empty regioninfo qualifiers in hbase:meta
private Set<Result> emptyRegionInfoQualifiers = new HashSet<Result>();
/**
* This map from Tablename -> TableInfo contains the structures necessary to
* detect table consistency problems (holes, dupes, overlaps). It is sorted
* to prevent dupes.
*
* If tablesIncluded is empty, this map contains all tables.
* Otherwise, it contains only meta tables and tables in tablesIncluded,
* unless checkMetaOnly is specified, in which case, it contains only
* the meta table
*/
private SortedMap<TableName, TableInfo> tablesInfo =
new ConcurrentSkipListMap<TableName, TableInfo>();
/**
* When initially looking at HDFS, we attempt to find any orphaned data.
*/
private List<HbckInfo> orphanHdfsDirs = Collections.synchronizedList(new ArrayList<HbckInfo>());
private Map<TableName, Set<String>> orphanTableDirs =
new HashMap<TableName, Set<String>>();
private Map<TableName, TableState> tableStates =
new HashMap<TableName, TableState>();
private final RetryCounterFactory lockFileRetryCounterFactory;
/**
* Constructor
*
* @param conf Configuration object
* @throws MasterNotRunningException if the master is not running
* @throws ZooKeeperConnectionException if unable to connect to ZooKeeper
*/
public HBaseFsck(Configuration conf) throws MasterNotRunningException,
ZooKeeperConnectionException, IOException, ClassNotFoundException {
super(conf);
// make a copy, just to be sure we're not overriding someone else's config
setConf(HBaseConfiguration.create(getConf()));
// disable blockcache for tool invocation, see HBASE-10500
getConf().setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0);
// Disable usage of meta replicas in hbck
getConf().setBoolean(HConstants.USE_META_REPLICAS, false);
errors = getErrorReporter(conf);
int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS);
executor = new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck"));
lockFileRetryCounterFactory = new RetryCounterFactory(
getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),
getConf().getInt("hbase.hbck.lockfile.attempt.sleep.interval",
DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL));
}
/**
* Constructor
*
* @param conf
* Configuration object
* @throws MasterNotRunningException
* if the master is not running
* @throws ZooKeeperConnectionException
* if unable to connect to ZooKeeper
*/
public HBaseFsck(Configuration conf, ExecutorService exec) throws MasterNotRunningException,
ZooKeeperConnectionException, IOException, ClassNotFoundException {
super(conf);
errors = getErrorReporter(getConf());
this.executor = exec;
lockFileRetryCounterFactory = new RetryCounterFactory(
getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),
getConf().getInt("hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL));
}
private class FileLockCallable implements Callable<FSDataOutputStream> {
RetryCounter retryCounter;
public FileLockCallable(RetryCounter retryCounter) {
this.retryCounter = retryCounter;
}
@Override
public FSDataOutputStream call() throws IOException {
try {
FileSystem fs = FSUtils.getCurrentFileSystem(getConf());
FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),
HConstants.DATA_FILE_UMASK_KEY);
Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);
fs.mkdirs(tmpDir);
HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);
final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);
out.writeBytes(InetAddress.getLocalHost().toString());
out.flush();
return out;
} catch(RemoteException e) {
if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){
return null;
} else {
throw e;
}
}
}
private FSDataOutputStream createFileWithRetries(final FileSystem fs,
final Path hbckLockFilePath, final FsPermission defaultPerms)
throws IOException {
IOException exception = null;
do {
try {
return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);
} catch (IOException ioe) {
LOG.info("Failed to create lock file " + hbckLockFilePath.getName()
+ ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "
+ retryCounter.getMaxAttempts());
LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),
ioe);
try {
exception = ioe;
retryCounter.sleepUntilNextRetry();
} catch (InterruptedException ie) {
throw (InterruptedIOException) new InterruptedIOException(
"Can't create lock file " + hbckLockFilePath.getName())
.initCause(ie);
}
}
} while (retryCounter.shouldRetry());
throw exception;
}
}
/**
* This method maintains a lock using a file. If the creation fails we return null
*
* @return FSDataOutputStream object corresponding to the newly opened lock file
* @throws IOException
*/
private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {
RetryCounter retryCounter = lockFileRetryCounterFactory.create();
FileLockCallable callable = new FileLockCallable(retryCounter);
ExecutorService executor = Executors.newFixedThreadPool(1);
FutureTask<FSDataOutputStream> futureTask = new FutureTask<FSDataOutputStream>(callable);
executor.execute(futureTask);
final int timeoutInSeconds = 30;
FSDataOutputStream stream = null;
try {
stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);
} catch (ExecutionException ee) {
LOG.warn("Encountered exception when opening lock file", ee);
} catch (InterruptedException ie) {
LOG.warn("Interrupted when opening lock file", ie);
Thread.currentThread().interrupt();
} catch (TimeoutException exception) {
// took too long to obtain lock
LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");
futureTask.cancel(true);
} finally {
executor.shutdownNow();
}
return stream;
}
private void unlockHbck() {
if (hbckLockCleanup.compareAndSet(true, false)) {
RetryCounter retryCounter = lockFileRetryCounterFactory.create();
do {
try {
IOUtils.closeStream(hbckOutFd);
FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),
HBCK_LOCK_PATH, true);
return;
} catch (IOException ioe) {
LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="
+ (retryCounter.getAttemptTimes() + 1) + " of "
+ retryCounter.getMaxAttempts());
LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);
try {
retryCounter.sleepUntilNextRetry();
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
LOG.warn("Interrupted while deleting lock file" +
HBCK_LOCK_PATH);
return;
}
}
} while (retryCounter.shouldRetry());
}
}
/**
* To repair region consistency, one must call connect() in order to repair
* online state.
*/
public void connect() throws IOException {
// Check if another instance of balancer is running
hbckOutFd = checkAndMarkRunningHbck();
if (hbckOutFd == null) {
setRetCode(-1);
LOG.error("Another instance of hbck is running, exiting this instance.[If you are sure" +
" no other instance is running, delete the lock file " +
HBCK_LOCK_PATH + " and rerun the tool]");
throw new IOException("Duplicate hbck - Abort");
}
// Make sure to cleanup the lock
hbckLockCleanup.set(true);
// Add a shutdown hook to this thread, incase user tries to
// kill the hbck with a ctrl-c, we want to cleanup the lock so that
// it is available for further calls
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
IOUtils.closeStream(HBaseFsck.this);
unlockHbck();
}
});
LOG.debug("Launching hbck");
connection = (ClusterConnection)ConnectionFactory.createConnection(getConf());
admin = connection.getAdmin();
meta = connection.getTable(TableName.META_TABLE_NAME);
status = admin.getClusterStatus();
}
/**
* Get deployed regions according to the region servers.
*/
private void loadDeployedRegions() throws IOException, InterruptedException {
// From the master, get a list of all known live region servers
Collection<ServerName> regionServers = status.getServers();
errors.print("Number of live region servers: " + regionServers.size());
if (details) {
for (ServerName rsinfo: regionServers) {
errors.print(" " + rsinfo.getServerName());
}
}
// From the master, get a list of all dead region servers
Collection<ServerName> deadRegionServers = status.getDeadServerNames();
errors.print("Number of dead region servers: " + deadRegionServers.size());
if (details) {
for (ServerName name: deadRegionServers) {
errors.print(" " + name);
}
}
// Print the current master name and state
errors.print("Master: " + status.getMaster());
// Print the list of all backup masters
Collection<ServerName> backupMasters = status.getBackupMasters();
errors.print("Number of backup masters: " + backupMasters.size());
if (details) {
for (ServerName name: backupMasters) {
errors.print(" " + name);
}
}
errors.print("Average load: " + status.getAverageLoad());
errors.print("Number of requests: " + status.getRequestsCount());
errors.print("Number of regions: " + status.getRegionsCount());
Map<String, RegionState> rits = status.getRegionsInTransition();
errors.print("Number of regions in transition: " + rits.size());
if (details) {
for (RegionState state: rits.values()) {
errors.print(" " + state.toDescriptiveString());
}
}
// Determine what's deployed
processRegionServers(regionServers);
}
/**
* Clear the current state of hbck.
*/
private void clearState() {
// Make sure regionInfo is empty before starting
fixes = 0;
regionInfoMap.clear();
emptyRegionInfoQualifiers.clear();
tableStates.clear();
errors.clear();
tablesInfo.clear();
orphanHdfsDirs.clear();
}
/**
* This repair method analyzes hbase data in hdfs and repairs it to satisfy
* the table integrity rules. HBase doesn't need to be online for this
* operation to work.
*/
public void offlineHdfsIntegrityRepair() throws IOException, InterruptedException {
// Initial pass to fix orphans.
if (shouldCheckHdfs() && (shouldFixHdfsOrphans() || shouldFixHdfsHoles()
|| shouldFixHdfsOverlaps() || shouldFixTableOrphans())) {
LOG.info("Loading regioninfos HDFS");
// if nothing is happening this should always complete in two iterations.
int maxIterations = getConf().getInt("hbase.hbck.integrityrepair.iterations.max", 3);
int curIter = 0;
do {
clearState(); // clears hbck state and reset fixes to 0 and.
// repair what's on HDFS
restoreHdfsIntegrity();
curIter++;// limit the number of iterations.
} while (fixes > 0 && curIter <= maxIterations);
// Repairs should be done in the first iteration and verification in the second.
// If there are more than 2 passes, something funny has happened.
if (curIter > 2) {
if (curIter == maxIterations) {
LOG.warn("Exiting integrity repairs after max " + curIter + " iterations. "
+ "Tables integrity may not be fully repaired!");
} else {
LOG.info("Successfully exiting integrity repairs after " + curIter + " iterations");
}
}
}
}
/**
* This repair method requires the cluster to be online since it contacts
* region servers and the masters. It makes each region's state in HDFS, in
* hbase:meta, and deployments consistent.
*
* @return If > 0 , number of errors detected, if < 0 there was an unrecoverable
* error. If 0, we have a clean hbase.
*/
public int onlineConsistencyRepair() throws IOException, KeeperException,
InterruptedException {
clearState();
// get regions according to what is online on each RegionServer
loadDeployedRegions();
// check whether hbase:meta is deployed and online
recordMetaRegion();
// Check if hbase:meta is found only once and in the right place
if (!checkMetaRegion()) {
String errorMsg = "hbase:meta table is not consistent. ";
if (shouldFixAssignments()) {
errorMsg += "HBCK will try fixing it. Rerun once hbase:meta is back to consistent state.";
} else {
errorMsg += "Run HBCK with proper fix options to fix hbase:meta inconsistency.";
}
errors.reportError(errorMsg + " Exiting...");
return -2;
}
// Not going with further consistency check for tables when hbase:meta itself is not consistent.
LOG.info("Loading regionsinfo from the hbase:meta table");
boolean success = loadMetaEntries();
if (!success) return -1;
// Empty cells in hbase:meta?
reportEmptyMetaCells();
// Check if we have to cleanup empty REGIONINFO_QUALIFIER rows from hbase:meta
if (shouldFixEmptyMetaCells()) {
fixEmptyMetaCells();
}
// get a list of all tables that have not changed recently.
if (!checkMetaOnly) {
reportTablesInFlux();
}
// Get disabled tables states
loadTableStates();
// load regiondirs and regioninfos from HDFS
if (shouldCheckHdfs()) {
loadHdfsRegionDirs();
loadHdfsRegionInfos();
}
// fix the orphan tables
fixOrphanTables();
// Check and fix consistency
checkAndFixConsistency();
// Check integrity (does not fix)
checkIntegrity();
return errors.getErrorList().size();
}
/**
* Contacts the master and prints out cluster-wide information
* @return 0 on success, non-zero on failure
*/
public int onlineHbck() throws IOException, KeeperException, InterruptedException, ServiceException {
// print hbase server version
errors.print("Version: " + status.getHBaseVersion());
offlineHdfsIntegrityRepair();
// turn the balancer off
boolean oldBalancer = admin.setBalancerRunning(false, true);
try {
onlineConsistencyRepair();
}
finally {
admin.setBalancerRunning(oldBalancer, false);
}
if (checkRegionBoundaries) {
checkRegionBoundaries();
}
offlineReferenceFileRepair();
checkAndFixTableLocks();
// Remove the hbck lock
unlockHbck();
// Print table summary
printTableSummary(tablesInfo);
return errors.summarize();
}
public static byte[] keyOnly (byte[] b) {
if (b == null)
return b;
int rowlength = Bytes.toShort(b, 0);
byte[] result = new byte[rowlength];
System.arraycopy(b, Bytes.SIZEOF_SHORT, result, 0, rowlength);
return result;
}
@Override
public void close() throws IOException {
IOUtils.cleanup(null, admin, meta, connection);
}
private static class RegionBoundariesInformation {
public byte [] regionName;
public byte [] metaFirstKey;
public byte [] metaLastKey;
public byte [] storesFirstKey;
public byte [] storesLastKey;
@Override
public String toString () {
return "regionName=" + Bytes.toStringBinary(regionName) +
"\nmetaFirstKey=" + Bytes.toStringBinary(metaFirstKey) +
"\nmetaLastKey=" + Bytes.toStringBinary(metaLastKey) +
"\nstoresFirstKey=" + Bytes.toStringBinary(storesFirstKey) +
"\nstoresLastKey=" + Bytes.toStringBinary(storesLastKey);
}
}
public void checkRegionBoundaries() {
try {
ByteArrayComparator comparator = new ByteArrayComparator();
List<HRegionInfo> regions = MetaScanner.listAllRegions(getConf(), connection, false);
final RegionBoundariesInformation currentRegionBoundariesInformation =
new RegionBoundariesInformation();
Path hbaseRoot = FSUtils.getRootDir(getConf());
for (HRegionInfo regionInfo : regions) {
Path tableDir = FSUtils.getTableDir(hbaseRoot, regionInfo.getTable());
currentRegionBoundariesInformation.regionName = regionInfo.getRegionName();
// For each region, get the start and stop key from the META and compare them to the
// same information from the Stores.
Path path = new Path(tableDir, regionInfo.getEncodedName());
FileSystem fs = path.getFileSystem(getConf());
FileStatus[] files = fs.listStatus(path);
// For all the column families in this region...
byte[] storeFirstKey = null;
byte[] storeLastKey = null;
for (FileStatus file : files) {
String fileName = file.getPath().toString();
fileName = fileName.substring(fileName.lastIndexOf("/") + 1);
if (!fileName.startsWith(".") && !fileName.endsWith("recovered.edits")) {
FileStatus[] storeFiles = fs.listStatus(file.getPath());
// For all the stores in this column family.
for (FileStatus storeFile : storeFiles) {
HFile.Reader reader = HFile.createReader(fs, storeFile.getPath(), new CacheConfig(
getConf()), getConf());
if ((reader.getFirstKey() != null)
&& ((storeFirstKey == null) || (comparator.compare(storeFirstKey,
reader.getFirstKey()) > 0))) {
storeFirstKey = reader.getFirstKey();
}
if ((reader.getLastKey() != null)
&& ((storeLastKey == null) || (comparator.compare(storeLastKey,
reader.getLastKey())) < 0)) {
storeLastKey = reader.getLastKey();
}
reader.close();
}
}
}
currentRegionBoundariesInformation.metaFirstKey = regionInfo.getStartKey();
currentRegionBoundariesInformation.metaLastKey = regionInfo.getEndKey();
currentRegionBoundariesInformation.storesFirstKey = keyOnly(storeFirstKey);
currentRegionBoundariesInformation.storesLastKey = keyOnly(storeLastKey);
if (currentRegionBoundariesInformation.metaFirstKey.length == 0)
currentRegionBoundariesInformation.metaFirstKey = null;
if (currentRegionBoundariesInformation.metaLastKey.length == 0)
currentRegionBoundariesInformation.metaLastKey = null;
// For a region to be correct, we need the META start key to be smaller or equal to the
// smallest start key from all the stores, and the start key from the next META entry to
// be bigger than the last key from all the current stores. First region start key is null;
// Last region end key is null; some regions can be empty and not have any store.
boolean valid = true;
// Checking start key.
if ((currentRegionBoundariesInformation.storesFirstKey != null)
&& (currentRegionBoundariesInformation.metaFirstKey != null)) {
valid = valid
&& comparator.compare(currentRegionBoundariesInformation.storesFirstKey,
currentRegionBoundariesInformation.metaFirstKey) >= 0;
}
// Checking stop key.
if ((currentRegionBoundariesInformation.storesLastKey != null)
&& (currentRegionBoundariesInformation.metaLastKey != null)) {
valid = valid
&& comparator.compare(currentRegionBoundariesInformation.storesLastKey,
currentRegionBoundariesInformation.metaLastKey) < 0;
}
if (!valid) {
errors.reportError(ERROR_CODE.BOUNDARIES_ERROR, "Found issues with regions boundaries",
tablesInfo.get(regionInfo.getTable()));
LOG.warn("Region's boundaries not alligned between stores and META for:");
LOG.warn(currentRegionBoundariesInformation);
}
}
} catch (IOException e) {
LOG.error(e);
}
}
/**
* Iterates through the list of all orphan/invalid regiondirs.
*/
private void adoptHdfsOrphans(Collection<HbckInfo> orphanHdfsDirs) throws IOException {
for (HbckInfo hi : orphanHdfsDirs) {
LOG.info("Attempting to handle orphan hdfs dir: " + hi.getHdfsRegionDir());
adoptHdfsOrphan(hi);
}
}
/**
* Orphaned regions are regions without a .regioninfo file in them. We "adopt"
* these orphans by creating a new region, and moving the column families,
* recovered edits, WALs, into the new region dir. We determine the region
* startkey and endkeys by looking at all of the hfiles inside the column
* families to identify the min and max keys. The resulting region will
* likely violate table integrity but will be dealt with by merging
* overlapping regions.
*/
@SuppressWarnings("deprecation")
private void adoptHdfsOrphan(HbckInfo hi) throws IOException {
Path p = hi.getHdfsRegionDir();
FileSystem fs = p.getFileSystem(getConf());
FileStatus[] dirs = fs.listStatus(p);
if (dirs == null) {
LOG.warn("Attempt to adopt ophan hdfs region skipped becuase no files present in " +
p + ". This dir could probably be deleted.");
return ;
}
TableName tableName = hi.getTableName();
TableInfo tableInfo = tablesInfo.get(tableName);
Preconditions.checkNotNull(tableInfo, "Table '" + tableName + "' not present!");
HTableDescriptor template = tableInfo.getHTD();
// find min and max key values
Pair<byte[],byte[]> orphanRegionRange = null;
for (FileStatus cf : dirs) {
String cfName= cf.getPath().getName();
// TODO Figure out what the special dirs are
if (cfName.startsWith(".") || cfName.equals(HConstants.SPLIT_LOGDIR_NAME)) continue;
FileStatus[] hfiles = fs.listStatus(cf.getPath());
for (FileStatus hfile : hfiles) {
byte[] start, end;
HFile.Reader hf = null;
try {
CacheConfig cacheConf = new CacheConfig(getConf());
hf = HFile.createReader(fs, hfile.getPath(), cacheConf, getConf());
hf.loadFileInfo();
KeyValue startKv = KeyValue.createKeyValueFromKey(hf.getFirstKey());
start = startKv.getRow();
KeyValue endKv = KeyValue.createKeyValueFromKey(hf.getLastKey());
end = endKv.getRow();
} catch (IOException ioe) {
LOG.warn("Problem reading orphan file " + hfile + ", skipping");
continue;
} catch (NullPointerException ioe) {
LOG.warn("Orphan file " + hfile + " is possibly corrupted HFile, skipping");
continue;
} finally {
if (hf != null) {
hf.close();
}
}
// expand the range to include the range of all hfiles
if (orphanRegionRange == null) {
// first range
orphanRegionRange = new Pair<byte[], byte[]>(start, end);
} else {
// TODO add test
// expand range only if the hfile is wider.
if (Bytes.compareTo(orphanRegionRange.getFirst(), start) > 0) {
orphanRegionRange.setFirst(start);
}
if (Bytes.compareTo(orphanRegionRange.getSecond(), end) < 0 ) {
orphanRegionRange.setSecond(end);
}
}
}
}
if (orphanRegionRange == null) {
LOG.warn("No data in dir " + p + ", sidelining data");
fixes++;
sidelineRegionDir(fs, hi);
return;
}
LOG.info("Min max keys are : [" + Bytes.toString(orphanRegionRange.getFirst()) + ", " +
Bytes.toString(orphanRegionRange.getSecond()) + ")");
// create new region on hdfs. move data into place.
HRegionInfo hri = new HRegionInfo(template.getTableName(), orphanRegionRange.getFirst(), orphanRegionRange.getSecond());
LOG.info("Creating new region : " + hri);
HRegion region = HBaseFsckRepair.createHDFSRegionDir(getConf(), hri, template);
Path target = region.getRegionFileSystem().getRegionDir();
// rename all the data to new region
mergeRegionDirs(target, hi);
fixes++;
}
/**
* This method determines if there are table integrity errors in HDFS. If
* there are errors and the appropriate "fix" options are enabled, the method
* will first correct orphan regions making them into legit regiondirs, and
* then reload to merge potentially overlapping regions.
*
* @return number of table integrity errors found
*/
private int restoreHdfsIntegrity() throws IOException, InterruptedException {
// Determine what's on HDFS
LOG.info("Loading HBase regioninfo from HDFS...");
loadHdfsRegionDirs(); // populating regioninfo table.
int errs = errors.getErrorList().size();
// First time just get suggestions.
tablesInfo = loadHdfsRegionInfos(); // update tableInfos based on region info in fs.
checkHdfsIntegrity(false, false);
if (errors.getErrorList().size() == errs) {
LOG.info("No integrity errors. We are done with this phase. Glorious.");
return 0;
}
if (shouldFixHdfsOrphans() && orphanHdfsDirs.size() > 0) {
adoptHdfsOrphans(orphanHdfsDirs);
// TODO optimize by incrementally adding instead of reloading.
}
// Make sure there are no holes now.
if (shouldFixHdfsHoles()) {
clearState(); // this also resets # fixes.
loadHdfsRegionDirs();
tablesInfo = loadHdfsRegionInfos(); // update tableInfos based on region info in fs.
tablesInfo = checkHdfsIntegrity(shouldFixHdfsHoles(), false);
}
// Now we fix overlaps
if (shouldFixHdfsOverlaps()) {
// second pass we fix overlaps.
clearState(); // this also resets # fixes.
loadHdfsRegionDirs();
tablesInfo = loadHdfsRegionInfos(); // update tableInfos based on region info in fs.
tablesInfo = checkHdfsIntegrity(false, shouldFixHdfsOverlaps());
}
return errors.getErrorList().size();
}
/**
* Scan all the store file names to find any lingering reference files,
* which refer to some none-exiting files. If "fix" option is enabled,
* any lingering reference file will be sidelined if found.
* <p>
* Lingering reference file prevents a region from opening. It has to
* be fixed before a cluster can start properly.
*/
private void offlineReferenceFileRepair() throws IOException {
Configuration conf = getConf();
Path hbaseRoot = FSUtils.getRootDir(conf);
FileSystem fs = hbaseRoot.getFileSystem(conf);
Map<String, Path> allFiles = FSUtils.getTableStoreFilePathMap(fs, hbaseRoot);
for (Path path: allFiles.values()) {
boolean isReference = false;
try {
isReference = StoreFileInfo.isReference(path);
} catch (Throwable t) {
// Ignore. Some files may not be store files at all.
// For example, files under .oldlogs folder in hbase:meta
// Warning message is already logged by
// StoreFile#isReference.
}
if (!isReference) continue;
Path referredToFile = StoreFileInfo.getReferredToFile(path);
if (fs.exists(referredToFile)) continue; // good, expected
// Found a lingering reference file
errors.reportError(ERROR_CODE.LINGERING_REFERENCE_HFILE,
"Found lingering reference file " + path);
if (!shouldFixReferenceFiles()) continue;
// Now, trying to fix it since requested
boolean success = false;
String pathStr = path.toString();
// A reference file path should be like
// ${hbase.rootdir}/data/namespace/table_name/region_id/family_name/referred_file.region_name
// Up 5 directories to get the root folder.
// So the file will be sidelined to a similar folder structure.
int index = pathStr.lastIndexOf(Path.SEPARATOR_CHAR);
for (int i = 0; index > 0 && i < 5; i++) {
index = pathStr.lastIndexOf(Path.SEPARATOR_CHAR, index - 1);
}
if (index > 0) {
Path rootDir = getSidelineDir();
Path dst = new Path(rootDir, pathStr.substring(index + 1));
fs.mkdirs(dst.getParent());
LOG.info("Trying to sildeline reference file "
+ path + " to " + dst);
setShouldRerun();
success = fs.rename(path, dst);
}
if (!success) {
LOG.error("Failed to sideline reference file " + path);
}
}
}
/**
* TODO -- need to add tests for this.
*/
private void reportEmptyMetaCells() {
errors.print("Number of empty REGIONINFO_QUALIFIER rows in hbase:meta: " +
emptyRegionInfoQualifiers.size());
if (details) {
for (Result r: emptyRegionInfoQualifiers) {
errors.print(" " + r);
}
}
}
/**
* TODO -- need to add tests for this.
*/
private void reportTablesInFlux() {
AtomicInteger numSkipped = new AtomicInteger(0);
HTableDescriptor[] allTables = getTables(numSkipped);
errors.print("Number of Tables: " + allTables.length);
if (details) {
if (numSkipped.get() > 0) {
errors.detail("Number of Tables in flux: " + numSkipped.get());
}
for (HTableDescriptor td : allTables) {
errors.detail(" Table: " + td.getTableName() + "\t" +
(td.isReadOnly() ? "ro" : "rw") + "\t" +
(td.isMetaRegion() ? "META" : " ") + "\t" +
" families: " + td.getFamilies().size());
}
}
}
public ErrorReporter getErrors() {
return errors;
}
/**
* Read the .regioninfo file from the file system. If there is no
* .regioninfo, add it to the orphan hdfs region list.
*/
private void loadHdfsRegioninfo(HbckInfo hbi) throws IOException {
Path regionDir = hbi.getHdfsRegionDir();
if (regionDir == null) {
LOG.warn("No HDFS region dir found: " + hbi + " meta=" + hbi.metaEntry);
return;
}
if (hbi.hdfsEntry.hri != null) {
// already loaded data
return;
}
FileSystem fs = FileSystem.get(getConf());
HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
LOG.debug("HRegionInfo read: " + hri.toString());
hbi.hdfsEntry.hri = hri;
}
/**
* Exception thrown when a integrity repair operation fails in an
* unresolvable way.
*/
public static class RegionRepairException extends IOException {
private static final long serialVersionUID = 1L;
final IOException ioe;
public RegionRepairException(String s, IOException ioe) {
super(s);
this.ioe = ioe;
}
}
/**
* Populate hbi's from regionInfos loaded from file system.
*/
private SortedMap<TableName, TableInfo> loadHdfsRegionInfos()
throws IOException, InterruptedException {
tablesInfo.clear(); // regenerating the data
// generate region split structure
Collection<HbckInfo> hbckInfos = regionInfoMap.values();
// Parallelized read of .regioninfo files.
List<WorkItemHdfsRegionInfo> hbis = new ArrayList<WorkItemHdfsRegionInfo>(hbckInfos.size());
List<Future<Void>> hbiFutures;
for (HbckInfo hbi : hbckInfos) {
WorkItemHdfsRegionInfo work = new WorkItemHdfsRegionInfo(hbi, this, errors);
hbis.add(work);
}
// Submit and wait for completion
hbiFutures = executor.invokeAll(hbis);
for(int i=0; i<hbiFutures.size(); i++) {
WorkItemHdfsRegionInfo work = hbis.get(i);
Future<Void> f = hbiFutures.get(i);
try {
f.get();
} catch(ExecutionException e) {
LOG.warn("Failed to read .regioninfo file for region " +
work.hbi.getRegionNameAsString(), e.getCause());
}
}
Path hbaseRoot = FSUtils.getRootDir(getConf());
FileSystem fs = hbaseRoot.getFileSystem(getConf());
// serialized table info gathering.
for (HbckInfo hbi: hbckInfos) {
if (hbi.getHdfsHRI() == null) {
// was an orphan
continue;
}
// get table name from hdfs, populate various HBaseFsck tables.
TableName tableName = hbi.getTableName();
if (tableName == null) {
// There was an entry in hbase:meta not in the HDFS?
LOG.warn("tableName was null for: " + hbi);
continue;
}
TableInfo modTInfo = tablesInfo.get(tableName);
if (modTInfo == null) {
// only executed once per table.
modTInfo = new TableInfo(tableName);
tablesInfo.put(tableName, modTInfo);
try {
TableDescriptor htd =
FSTableDescriptors.getTableDescriptorFromFs(fs, hbaseRoot, tableName);
modTInfo.htds.add(htd.getHTableDescriptor());
} catch (IOException ioe) {
if (!orphanTableDirs.containsKey(tableName)) {
LOG.warn("Unable to read .tableinfo from " + hbaseRoot, ioe);
//should only report once for each table
errors.reportError(ERROR_CODE.NO_TABLEINFO_FILE,
"Unable to read .tableinfo from " + hbaseRoot + "/" + tableName);
Set<String> columns = new HashSet<String>();
orphanTableDirs.put(tableName, getColumnFamilyList(columns, hbi));
}
}
}
if (!hbi.isSkipChecks()) {
modTInfo.addRegionInfo(hbi);
}
}
loadTableInfosForTablesWithNoRegion();
return tablesInfo;
}
/**
* To get the column family list according to the column family dirs
* @param columns
* @param hbi
* @return a set of column families
* @throws IOException
*/
private Set<String> getColumnFamilyList(Set<String> columns, HbckInfo hbi) throws IOException {
Path regionDir = hbi.getHdfsRegionDir();
FileSystem fs = regionDir.getFileSystem(getConf());
FileStatus[] subDirs = fs.listStatus(regionDir, new FSUtils.FamilyDirFilter(fs));
for (FileStatus subdir : subDirs) {
String columnfamily = subdir.getPath().getName();
columns.add(columnfamily);
}
return columns;
}
/**
* To fabricate a .tableinfo file with following contents<br>
* 1. the correct tablename <br>
* 2. the correct colfamily list<br>
* 3. the default properties for both {@link HTableDescriptor} and {@link HColumnDescriptor}<br>
* @throws IOException
*/
private boolean fabricateTableInfo(FSTableDescriptors fstd, TableName tableName,
Set<String> columns) throws IOException {
if (columns ==null || columns.isEmpty()) return false;
HTableDescriptor htd = new HTableDescriptor(tableName);
for (String columnfamimly : columns) {
htd.addFamily(new HColumnDescriptor(columnfamimly));
}
fstd.createTableDescriptor(new TableDescriptor(htd), true);
return true;
}
/**
* To fix the empty REGIONINFO_QUALIFIER rows from hbase:meta <br>
* @throws IOException
*/
public void fixEmptyMetaCells() throws IOException {
if (shouldFixEmptyMetaCells() && !emptyRegionInfoQualifiers.isEmpty()) {
LOG.info("Trying to fix empty REGIONINFO_QUALIFIER hbase:meta rows.");
for (Result region : emptyRegionInfoQualifiers) {
deleteMetaRegion(region.getRow());
errors.getErrorList().remove(ERROR_CODE.EMPTY_META_CELL);
}
emptyRegionInfoQualifiers.clear();
}
}
/**
* To fix orphan table by creating a .tableinfo file under tableDir <br>
* 1. if TableInfo is cached, to recover the .tableinfo accordingly <br>
* 2. else create a default .tableinfo file with following items<br>
* 2.1 the correct tablename <br>
* 2.2 the correct colfamily list<br>
* 2.3 the default properties for both {@link HTableDescriptor} and {@link HColumnDescriptor}<br>
* @throws IOException
*/
public void fixOrphanTables() throws IOException {
if (shouldFixTableOrphans() && !orphanTableDirs.isEmpty()) {
List<TableName> tmpList = new ArrayList<TableName>();
tmpList.addAll(orphanTableDirs.keySet());
HTableDescriptor[] htds = getHTableDescriptors(tmpList);
Iterator<Entry<TableName, Set<String>>> iter =
orphanTableDirs.entrySet().iterator();
int j = 0;
int numFailedCase = 0;
FSTableDescriptors fstd = new FSTableDescriptors(getConf());
while (iter.hasNext()) {
Entry<TableName, Set<String>> entry =
iter.next();
TableName tableName = entry.getKey();
LOG.info("Trying to fix orphan table error: " + tableName);
if (j < htds.length) {
if (tableName.equals(htds[j].getTableName())) {
HTableDescriptor htd = htds[j];
LOG.info("fixing orphan table: " + tableName + " from cache");
fstd.createTableDescriptor(new TableDescriptor(htd), true);
j++;
iter.remove();
}
} else {
if (fabricateTableInfo(fstd, tableName, entry.getValue())) {
LOG.warn("fixing orphan table: " + tableName + " with a default .tableinfo file");
LOG.warn("Strongly recommend to modify the HTableDescriptor if necessary for: " + tableName);
iter.remove();
} else {
LOG.error("Unable to create default .tableinfo for " + tableName + " while missing column family information");
numFailedCase++;
}
}
fixes++;
}
if (orphanTableDirs.isEmpty()) {
// all orphanTableDirs are luckily recovered
// re-run doFsck after recovering the .tableinfo file
setShouldRerun();
LOG.warn("Strongly recommend to re-run manually hfsck after all orphanTableDirs being fixed");
} else if (numFailedCase > 0) {
LOG.error("Failed to fix " + numFailedCase
+ " OrphanTables with default .tableinfo files");
}
}
//cleanup the list
orphanTableDirs.clear();
}
/**
* This borrows code from MasterFileSystem.bootstrap(). Explicitly creates it's own WAL, so be
* sure to close it as well as the region when you're finished.
*
* @return an open hbase:meta HRegion
*/
private HRegion createNewMeta() throws IOException {
Path rootdir = FSUtils.getRootDir(getConf());
Configuration c = getConf();
HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
HTableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME);
MasterFileSystem.setInfoFamilyCachingForMeta(metaDescriptor, false);
// The WAL subsystem will use the default rootDir rather than the passed in rootDir
// unless I pass along via the conf.
Configuration confForWAL = new Configuration(c);
confForWAL.set(HConstants.HBASE_DIR, rootdir.toString());
WAL wal = (new WALFactory(confForWAL,
Collections.<WALActionsListener>singletonList(new MetricsWAL()),
"hbck-meta-recovery-" + RandomStringUtils.randomNumeric(8))).
getWAL(metaHRI.getEncodedNameAsBytes());
HRegion meta = HRegion.createHRegion(metaHRI, rootdir, c, metaDescriptor, wal);
MasterFileSystem.setInfoFamilyCachingForMeta(metaDescriptor, true);
return meta;
}
/**
* Generate set of puts to add to new meta. This expects the tables to be
* clean with no overlaps or holes. If there are any problems it returns null.
*
* @return An array list of puts to do in bulk, null if tables have problems
*/
private ArrayList<Put> generatePuts(
SortedMap<TableName, TableInfo> tablesInfo) throws IOException {
ArrayList<Put> puts = new ArrayList<Put>();
boolean hasProblems = false;
for (Entry<TableName, TableInfo> e : tablesInfo.entrySet()) {
TableName name = e.getKey();
// skip "hbase:meta"
if (name.compareTo(TableName.META_TABLE_NAME) == 0) {
continue;
}
TableInfo ti = e.getValue();
puts.add(MetaTableAccessor
.makePutFromTableState(new TableState(ti.tableName, TableState.State.ENABLED)));
for (Entry<byte[], Collection<HbckInfo>> spl : ti.sc.getStarts().asMap()
.entrySet()) {
Collection<HbckInfo> his = spl.getValue();
int sz = his.size();
if (sz != 1) {
// problem
LOG.error("Split starting at " + Bytes.toStringBinary(spl.getKey())
+ " had " + sz + " regions instead of exactly 1." );
hasProblems = true;
continue;
}
// add the row directly to meta.
HbckInfo hi = his.iterator().next();
HRegionInfo hri = hi.getHdfsHRI(); // hi.metaEntry;
Put p = MetaTableAccessor.makePutFromRegionInfo(hri);
puts.add(p);
}
}
return hasProblems ? null : puts;
}
/**
* Suggest fixes for each table
*/
private void suggestFixes(
SortedMap<TableName, TableInfo> tablesInfo) throws IOException {
for (TableInfo tInfo : tablesInfo.values()) {
TableIntegrityErrorHandler handler = tInfo.new IntegrityFixSuggester(tInfo, errors);
tInfo.checkRegionChain(handler);
}
}
/**
* Rebuilds meta from information in hdfs/fs. Depends on configuration settings passed into
* hbck constructor to point to a particular fs/dir. Assumes HBase is OFFLINE.
*
* @param fix flag that determines if method should attempt to fix holes
* @return true if successful, false if attempt failed.
*/
public boolean rebuildMeta(boolean fix) throws IOException,
InterruptedException {
// TODO check to make sure hbase is offline. (or at least the table
// currently being worked on is off line)
// Determine what's on HDFS
LOG.info("Loading HBase regioninfo from HDFS...");
loadHdfsRegionDirs(); // populating regioninfo table.
int errs = errors.getErrorList().size();
tablesInfo = loadHdfsRegionInfos(); // update tableInfos based on region info in fs.
checkHdfsIntegrity(false, false);
// make sure ok.
if (errors.getErrorList().size() != errs) {
// While in error state, iterate until no more fixes possible
while(true) {
fixes = 0;
suggestFixes(tablesInfo);
errors.clear();
loadHdfsRegionInfos(); // update tableInfos based on region info in fs.
checkHdfsIntegrity(shouldFixHdfsHoles(), shouldFixHdfsOverlaps());
int errCount = errors.getErrorList().size();
if (fixes == 0) {
if (errCount > 0) {
return false; // failed to fix problems.
} else {
break; // no fixes and no problems? drop out and fix stuff!
}
}
}
}
// we can rebuild, move old meta out of the way and start
LOG.info("HDFS regioninfo's seems good. Sidelining old hbase:meta");
Path backupDir = sidelineOldMeta();
LOG.info("Creating new hbase:meta");
HRegion meta = createNewMeta();
// populate meta
List<Put> puts = generatePuts(tablesInfo);
if (puts == null) {
LOG.fatal("Problem encountered when creating new hbase:meta entries. " +
"You may need to restore the previously sidelined hbase:meta");
return false;
}
meta.batchMutate(puts.toArray(new Put[puts.size()]));
meta.close();
if (meta.getWAL() != null) {
meta.getWAL().close();
}
LOG.info("Success! hbase:meta table rebuilt.");
LOG.info("Old hbase:meta is moved into " + backupDir);
return true;
}
private SortedMap<TableName, TableInfo> checkHdfsIntegrity(boolean fixHoles,
boolean fixOverlaps) throws IOException {
LOG.info("Checking HBase region split map from HDFS data...");
for (TableInfo tInfo : tablesInfo.values()) {
TableIntegrityErrorHandler handler;
if (fixHoles || fixOverlaps) {
handler = tInfo.new HDFSIntegrityFixer(tInfo, errors, getConf(),
fixHoles, fixOverlaps);
} else {
handler = tInfo.new IntegrityFixSuggester(tInfo, errors);
}
if (!tInfo.checkRegionChain(handler)) {
// should dump info as well.
errors.report("Found inconsistency in table " + tInfo.getName());
}
}
return tablesInfo;
}
private Path getSidelineDir() throws IOException {
if (sidelineDir == null) {
Path hbaseDir = FSUtils.getRootDir(getConf());
Path hbckDir = new Path(hbaseDir, HConstants.HBCK_SIDELINEDIR_NAME);
sidelineDir = new Path(hbckDir, hbaseDir.getName() + "-"
+ startMillis);
}
return sidelineDir;
}
/**
* Sideline a region dir (instead of deleting it)
*/
Path sidelineRegionDir(FileSystem fs, HbckInfo hi) throws IOException {
return sidelineRegionDir(fs, null, hi);
}
/**
* Sideline a region dir (instead of deleting it)
*
* @param parentDir if specified, the region will be sidelined to
* folder like .../parentDir/<table name>/<region name>. The purpose
* is to group together similar regions sidelined, for example, those
* regions should be bulk loaded back later on. If null, it is ignored.
*/
Path sidelineRegionDir(FileSystem fs,
String parentDir, HbckInfo hi) throws IOException {
TableName tableName = hi.getTableName();
Path regionDir = hi.getHdfsRegionDir();
if (!fs.exists(regionDir)) {
LOG.warn("No previous " + regionDir + " exists. Continuing.");
return null;
}
Path rootDir = getSidelineDir();
if (parentDir != null) {
rootDir = new Path(rootDir, parentDir);
}
Path sidelineTableDir= FSUtils.getTableDir(rootDir, tableName);
Path sidelineRegionDir = new Path(sidelineTableDir, regionDir.getName());
fs.mkdirs(sidelineRegionDir);
boolean success = false;
FileStatus[] cfs = fs.listStatus(regionDir);
if (cfs == null) {
LOG.info("Region dir is empty: " + regionDir);
} else {
for (FileStatus cf : cfs) {
Path src = cf.getPath();
Path dst = new Path(sidelineRegionDir, src.getName());
if (fs.isFile(src)) {
// simple file
success = fs.rename(src, dst);
if (!success) {
String msg = "Unable to rename file " + src + " to " + dst;
LOG.error(msg);
throw new IOException(msg);
}
continue;
}
// is a directory.
fs.mkdirs(dst);
LOG.info("Sidelining files from " + src + " into containing region " + dst);
// FileSystem.rename is inconsistent with directories -- if the
// dst (foo/a) exists and is a dir, and the src (foo/b) is a dir,
// it moves the src into the dst dir resulting in (foo/a/b). If
// the dst does not exist, and the src a dir, src becomes dst. (foo/b)
FileStatus[] hfiles = fs.listStatus(src);
if (hfiles != null && hfiles.length > 0) {
for (FileStatus hfile : hfiles) {
success = fs.rename(hfile.getPath(), dst);
if (!success) {
String msg = "Unable to rename file " + src + " to " + dst;
LOG.error(msg);
throw new IOException(msg);
}
}
}
LOG.debug("Sideline directory contents:");
debugLsr(sidelineRegionDir);
}
}
LOG.info("Removing old region dir: " + regionDir);
success = fs.delete(regionDir, true);
if (!success) {
String msg = "Unable to delete dir " + regionDir;
LOG.error(msg);
throw new IOException(msg);
}
return sidelineRegionDir;
}
/**
* Side line an entire table.
*/
void sidelineTable(FileSystem fs, TableName tableName, Path hbaseDir,
Path backupHbaseDir) throws IOException {
Path tableDir = FSUtils.getTableDir(hbaseDir, tableName);
if (fs.exists(tableDir)) {
Path backupTableDir= FSUtils.getTableDir(backupHbaseDir, tableName);
fs.mkdirs(backupTableDir.getParent());
boolean success = fs.rename(tableDir, backupTableDir);
if (!success) {
throw new IOException("Failed to move " + tableName + " from "
+ tableDir + " to " + backupTableDir);
}
} else {
LOG.info("No previous " + tableName + " exists. Continuing.");
}
}
/**
* @return Path to backup of original directory
*/
Path sidelineOldMeta() throws IOException {
// put current hbase:meta aside.
Path hbaseDir = FSUtils.getRootDir(getConf());
FileSystem fs = hbaseDir.getFileSystem(getConf());
Path backupDir = getSidelineDir();
fs.mkdirs(backupDir);
try {
sidelineTable(fs, TableName.META_TABLE_NAME, hbaseDir, backupDir);
} catch (IOException e) {
LOG.fatal("... failed to sideline meta. Currently in inconsistent state. To restore "
+ "try to rename hbase:meta in " + backupDir.getName() + " to "
+ hbaseDir.getName() + ".", e);
throw e; // throw original exception
}
return backupDir;
}
/**
* Load the list of disabled tables in ZK into local set.
* @throws ZooKeeperConnectionException
* @throws IOException
*/
private void loadTableStates()
throws IOException {
tableStates = MetaTableAccessor.getTableStates(connection);
}
/**
* Check if the specified region's table is disabled.
* @param tableName table to check status of
*/
private boolean isTableDisabled(TableName tableName) {
return tableStates.containsKey(tableName)
&& tableStates.get(tableName)
.inStates(TableState.State.DISABLED, TableState.State.DISABLING);
}
/**
* Scan HDFS for all regions, recording their information into
* regionInfoMap
*/
public void loadHdfsRegionDirs() throws IOException, InterruptedException {
Path rootDir = FSUtils.getRootDir(getConf());
FileSystem fs = rootDir.getFileSystem(getConf());
// list all tables from HDFS
List<FileStatus> tableDirs = Lists.newArrayList();
boolean foundVersionFile = fs.exists(new Path(rootDir, HConstants.VERSION_FILE_NAME));
List<Path> paths = FSUtils.getTableDirs(fs, rootDir);
for (Path path : paths) {
TableName tableName = FSUtils.getTableName(path);
if ((!checkMetaOnly &&
isTableIncluded(tableName)) ||
tableName.equals(TableName.META_TABLE_NAME)) {
tableDirs.add(fs.getFileStatus(path));
}
}
// verify that version file exists
if (!foundVersionFile) {
errors.reportError(ERROR_CODE.NO_VERSION_FILE,
"Version file does not exist in root dir " + rootDir);
if (shouldFixVersionFile()) {
LOG.info("Trying to create a new " + HConstants.VERSION_FILE_NAME
+ " file.");
setShouldRerun();
FSUtils.setVersion(fs, rootDir, getConf().getInt(
HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), getConf().getInt(
HConstants.VERSION_FILE_WRITE_ATTEMPTS,
HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
}
}
// level 1: <HBASE_DIR>/*
List<WorkItemHdfsDir> dirs = new ArrayList<WorkItemHdfsDir>(tableDirs.size());
List<Future<Void>> dirsFutures;
for (FileStatus tableDir : tableDirs) {
LOG.debug("Loading region dirs from " +tableDir.getPath());
dirs.add(new WorkItemHdfsDir(this, fs, errors, tableDir));
}
// Invoke and wait for Callables to complete
dirsFutures = executor.invokeAll(dirs);
for(Future<Void> f: dirsFutures) {
try {
f.get();
} catch(ExecutionException e) {
LOG.warn("Could not load region dir " , e.getCause());
}
}
}
/**
* Record the location of the hbase:meta region as found in ZooKeeper.
*/
private boolean recordMetaRegion() throws IOException {
RegionLocations rl = ((ClusterConnection)connection).locateRegion(TableName.META_TABLE_NAME,
HConstants.EMPTY_START_ROW, false, false);
if (rl == null) {
errors.reportError(ERROR_CODE.NULL_META_REGION,
"META region was not found in Zookeeper");
return false;
}
for (HRegionLocation metaLocation : rl.getRegionLocations()) {
// Check if Meta region is valid and existing
if (metaLocation == null ) {
errors.reportError(ERROR_CODE.NULL_META_REGION,
"META region location is null");
return false;
}
if (metaLocation.getRegionInfo() == null) {
errors.reportError(ERROR_CODE.NULL_META_REGION,
"META location regionInfo is null");
return false;
}
if (metaLocation.getHostname() == null) {
errors.reportError(ERROR_CODE.NULL_META_REGION,
"META location hostName is null");
return false;
}
ServerName sn = metaLocation.getServerName();
MetaEntry m = new MetaEntry(metaLocation.getRegionInfo(), sn, System.currentTimeMillis());
HbckInfo hbckInfo = regionInfoMap.get(metaLocation.getRegionInfo().getEncodedName());
if (hbckInfo == null) {
regionInfoMap.put(metaLocation.getRegionInfo().getEncodedName(), new HbckInfo(m));
} else {
hbckInfo.metaEntry = m;
}
}
return true;
}
private ZooKeeperWatcher createZooKeeperWatcher() throws IOException {
return new ZooKeeperWatcher(getConf(), "hbase Fsck", new Abortable() {
@Override
public void abort(String why, Throwable e) {
LOG.error(why, e);
System.exit(1);
}
@Override
public boolean isAborted() {
return false;
}
});
}
private ServerName getMetaRegionServerName(int replicaId)
throws IOException, KeeperException {
ZooKeeperWatcher zkw = createZooKeeperWatcher();
ServerName sn = null;
try {
sn = new MetaTableLocator().getMetaRegionLocation(zkw, replicaId);
} finally {
zkw.close();
}
return sn;
}
/**
* Contacts each regionserver and fetches metadata about regions.
* @param regionServerList - the list of region servers to connect to
* @throws IOException if a remote or network exception occurs
*/
void processRegionServers(Collection<ServerName> regionServerList)
throws IOException, InterruptedException {
List<WorkItemRegion> workItems = new ArrayList<WorkItemRegion>(regionServerList.size());
List<Future<Void>> workFutures;
// loop to contact each region server in parallel
for (ServerName rsinfo: regionServerList) {
workItems.add(new WorkItemRegion(this, rsinfo, errors, connection));
}
workFutures = executor.invokeAll(workItems);
for(int i=0; i<workFutures.size(); i++) {
WorkItemRegion item = workItems.get(i);
Future<Void> f = workFutures.get(i);
try {
f.get();
} catch(ExecutionException e) {
LOG.warn("Could not process regionserver " + item.rsinfo.getHostAndPort(),
e.getCause());
}
}
}
/**
* Check consistency of all regions that have been found in previous phases.
*/
private void checkAndFixConsistency()
throws IOException, KeeperException, InterruptedException {
// Divide the checks in two phases. One for default/primary replicas and another
// for the non-primary ones. Keeps code cleaner this way.
for (java.util.Map.Entry<String, HbckInfo> e: regionInfoMap.entrySet()) {
if (e.getValue().getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) {
checkRegionConsistency(e.getKey(), e.getValue());
}
}
boolean prevHdfsCheck = shouldCheckHdfs();
setCheckHdfs(false); //replicas don't have any hdfs data
// Run a pass over the replicas and fix any assignment issues that exist on the currently
// deployed/undeployed replicas.
for (java.util.Map.Entry<String, HbckInfo> e: regionInfoMap.entrySet()) {
if (e.getValue().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) {
checkRegionConsistency(e.getKey(), e.getValue());
}
}
setCheckHdfs(prevHdfsCheck);
if (shouldCheckHdfs()) {
checkAndFixTableStates();
}
}
/**
* Check and fix table states, assumes full info available:
* - tableInfos
* - empty tables loaded
*/
private void checkAndFixTableStates() throws IOException {
// first check dangling states
for (Entry<TableName, TableState> entry : tableStates.entrySet()) {
TableName tableName = entry.getKey();
TableState tableState = entry.getValue();
TableInfo tableInfo = tablesInfo.get(tableName);
if (isTableIncluded(tableName)
&& !tableName.isSystemTable()
&& tableInfo == null) {
if (fixMeta) {
MetaTableAccessor.deleteTableState(connection, tableName);
TableState state = MetaTableAccessor.getTableState(connection, tableName);
if (state != null) {
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
tableName + " unable to delete dangling table state " + tableState);
}
} else {
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
tableName + " has dangling table state " + tableState);
}
}
}
// check that all tables have states
for (TableName tableName : tablesInfo.keySet()) {
if (isTableIncluded(tableName) && !tableStates.containsKey(tableName)) {
if (fixMeta) {
MetaTableAccessor.updateTableState(connection, tableName, TableState.State.ENABLED);
TableState newState = MetaTableAccessor.getTableState(connection, tableName);
if (newState == null) {
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
"Unable to change state for table " + tableName + " in meta ");
}
} else {
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
tableName + " has no state in meta ");
}
}
}
}
private void preCheckPermission() throws IOException, AccessDeniedException {
if (shouldIgnorePreCheckPermission()) {
return;
}
Path hbaseDir = FSUtils.getRootDir(getConf());
FileSystem fs = hbaseDir.getFileSystem(getConf());
UserProvider userProvider = UserProvider.instantiate(getConf());
UserGroupInformation ugi = userProvider.getCurrent().getUGI();
FileStatus[] files = fs.listStatus(hbaseDir);
for (FileStatus file : files) {
try {
FSUtils.checkAccess(ugi, file, FsAction.WRITE);
} catch (AccessDeniedException ace) {
LOG.warn("Got AccessDeniedException when preCheckPermission ", ace);
errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + ugi.getUserName()
+ " does not have write perms to " + file.getPath()
+ ". Please rerun hbck as hdfs user " + file.getOwner());
throw ace;
}
}
}
/**
* Deletes region from meta table
*/
private void deleteMetaRegion(HbckInfo hi) throws IOException {
deleteMetaRegion(hi.metaEntry.getRegionName());
}
/**
* Deletes region from meta table
*/
private void deleteMetaRegion(byte[] metaKey) throws IOException {
Delete d = new Delete(metaKey);
meta.delete(d);
LOG.info("Deleted " + Bytes.toString(metaKey) + " from META" );
}
/**
* Reset the split parent region info in meta table
*/
private void resetSplitParent(HbckInfo hi) throws IOException {
RowMutations mutations = new RowMutations(hi.metaEntry.getRegionName());
Delete d = new Delete(hi.metaEntry.getRegionName());
d.deleteColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
d.deleteColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
mutations.add(d);
HRegionInfo hri = new HRegionInfo(hi.metaEntry);
hri.setOffline(false);
hri.setSplit(false);
Put p = MetaTableAccessor.makePutFromRegionInfo(hri);
mutations.add(p);
meta.mutateRow(mutations);
LOG.info("Reset split parent " + hi.metaEntry.getRegionNameAsString() + " in META" );
}
/**
* This backwards-compatibility wrapper for permanently offlining a region
* that should not be alive. If the region server does not support the
* "offline" method, it will use the closest unassign method instead. This
* will basically work until one attempts to disable or delete the affected
* table. The problem has to do with in-memory only master state, so
* restarting the HMaster or failing over to another should fix this.
*/
private void offline(byte[] regionName) throws IOException {
String regionString = Bytes.toStringBinary(regionName);
if (!rsSupportsOffline) {
LOG.warn("Using unassign region " + regionString
+ " instead of using offline method, you should"
+ " restart HMaster after these repairs");
admin.unassign(regionName, true);
return;
}
// first time we assume the rs's supports #offline.
try {
LOG.info("Offlining region " + regionString);
admin.offline(regionName);
} catch (IOException ioe) {
String notFoundMsg = "java.lang.NoSuchMethodException: " +
"org.apache.hadoop.hbase.master.HMaster.offline([B)";
if (ioe.getMessage().contains(notFoundMsg)) {
LOG.warn("Using unassign region " + regionString
+ " instead of using offline method, you should"
+ " restart HMaster after these repairs");
rsSupportsOffline = false; // in the future just use unassign
admin.unassign(regionName, true);
return;
}
throw ioe;
}
}
private void undeployRegions(HbckInfo hi) throws IOException, InterruptedException {
undeployRegionsForHbi(hi);
// undeploy replicas of the region (but only if the method is invoked for the primary)
if (hi.getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) {
return;
}
int numReplicas = admin.getTableDescriptor(hi.getTableName()).getRegionReplication();
for (int i = 1; i < numReplicas; i++) {
if (hi.getPrimaryHRIForDeployedReplica() == null) continue;
HRegionInfo hri = RegionReplicaUtil.getRegionInfoForReplica(
hi.getPrimaryHRIForDeployedReplica(), i);
HbckInfo h = regionInfoMap.get(hri.getEncodedName());
if (h != null) {
undeployRegionsForHbi(h);
//set skip checks; we undeployed it, and we don't want to evaluate this anymore
//in consistency checks
h.setSkipChecks(true);
}
}
}
private void undeployRegionsForHbi(HbckInfo hi) throws IOException, InterruptedException {
for (OnlineEntry rse : hi.deployedEntries) {
LOG.debug("Undeploy region " + rse.hri + " from " + rse.hsa);
try {
HBaseFsckRepair.closeRegionSilentlyAndWait(connection, rse.hsa, rse.hri);
offline(rse.hri.getRegionName());
} catch (IOException ioe) {
LOG.warn("Got exception when attempting to offline region "
+ Bytes.toString(rse.hri.getRegionName()), ioe);
}
}
}
/**
* Attempts to undeploy a region from a region server based in information in
* META. Any operations that modify the file system should make sure that
* its corresponding region is not deployed to prevent data races.
*
* A separate call is required to update the master in-memory region state
* kept in the AssignementManager. Because disable uses this state instead of
* that found in META, we can't seem to cleanly disable/delete tables that
* have been hbck fixed. When used on a version of HBase that does not have
* the offline ipc call exposed on the master (<0.90.5, <0.92.0) a master
* restart or failover may be required.
*/
private void closeRegion(HbckInfo hi) throws IOException, InterruptedException {
if (hi.metaEntry == null && hi.hdfsEntry == null) {
undeployRegions(hi);
return;
}
// get assignment info and hregioninfo from meta.
Get get = new Get(hi.getRegionName());
get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
get.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
get.addColumn(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER);
// also get the locations of the replicas to close if the primary region is being closed
if (hi.getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) {
int numReplicas = admin.getTableDescriptor(hi.getTableName()).getRegionReplication();
for (int i = 0; i < numReplicas; i++) {
get.addColumn(HConstants.CATALOG_FAMILY, MetaTableAccessor.getServerColumn(i));
get.addColumn(HConstants.CATALOG_FAMILY, MetaTableAccessor.getStartCodeColumn(i));
}
}
Result r = meta.get(get);
RegionLocations rl = MetaTableAccessor.getRegionLocations(r);
if (rl == null) {
LOG.warn("Unable to close region " + hi.getRegionNameAsString() +
" since meta does not have handle to reach it");
return;
}
for (HRegionLocation h : rl.getRegionLocations()) {
ServerName serverName = h.getServerName();
if (serverName == null) {
errors.reportError("Unable to close region "
+ hi.getRegionNameAsString() + " because meta does not "
+ "have handle to reach it.");
continue;
}
HRegionInfo hri = h.getRegionInfo();
if (hri == null) {
LOG.warn("Unable to close region " + hi.getRegionNameAsString()
+ " because hbase:meta had invalid or missing "
+ HConstants.CATALOG_FAMILY_STR + ":"
+ Bytes.toString(HConstants.REGIONINFO_QUALIFIER)
+ " qualifier value.");
continue;
}
// close the region -- close files and remove assignment
HBaseFsckRepair.closeRegionSilentlyAndWait(connection, serverName, hri);
}
}
private void tryAssignmentRepair(HbckInfo hbi, String msg) throws IOException,
KeeperException, InterruptedException {
// If we are trying to fix the errors
if (shouldFixAssignments()) {
errors.print(msg);
undeployRegions(hbi);
setShouldRerun();
HRegionInfo hri = hbi.getHdfsHRI();
if (hri == null) {
hri = hbi.metaEntry;
}
HBaseFsckRepair.fixUnassigned(admin, hri);
HBaseFsckRepair.waitUntilAssigned(admin, hri);
// also assign replicas if needed (do it only when this call operates on a primary replica)
if (hbi.getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) return;
int replicationCount = admin.getTableDescriptor(hri.getTable()).getRegionReplication();
for (int i = 1; i < replicationCount; i++) {
hri = RegionReplicaUtil.getRegionInfoForReplica(hri, i);
HbckInfo h = regionInfoMap.get(hri.getEncodedName());
if (h != null) {
undeployRegions(h);
//set skip checks; we undeploy & deploy it; we don't want to evaluate this hbi anymore
//in consistency checks
h.setSkipChecks(true);
}
HBaseFsckRepair.fixUnassigned(admin, hri);
HBaseFsckRepair.waitUntilAssigned(admin, hri);
}
}
}
/**
* Check a single region for consistency and correct deployment.
*/
private void checkRegionConsistency(final String key, final HbckInfo hbi)
throws IOException, KeeperException, InterruptedException {
if (hbi.isSkipChecks()) return;
String descriptiveName = hbi.toString();
boolean inMeta = hbi.metaEntry != null;
// In case not checking HDFS, assume the region is on HDFS
boolean inHdfs = !shouldCheckHdfs() || hbi.getHdfsRegionDir() != null;
boolean hasMetaAssignment = inMeta && hbi.metaEntry.regionServer != null;
boolean isDeployed = !hbi.deployedOn.isEmpty();
boolean isMultiplyDeployed = hbi.deployedOn.size() > 1;
boolean deploymentMatchesMeta =
hasMetaAssignment && isDeployed && !isMultiplyDeployed &&
hbi.metaEntry.regionServer.equals(hbi.deployedOn.get(0));
boolean splitParent =
inMeta && hbi.metaEntry.isSplit() && hbi.metaEntry.isOffline();
boolean shouldBeDeployed = inMeta && !isTableDisabled(hbi.metaEntry.getTable());
boolean recentlyModified = inHdfs &&
hbi.getModTime() + timelag > System.currentTimeMillis();
// ========== First the healthy cases =============
if (hbi.containsOnlyHdfsEdits()) {
return;
}
if (inMeta && inHdfs && isDeployed && deploymentMatchesMeta && shouldBeDeployed) {
return;
} else if (inMeta && inHdfs && !shouldBeDeployed && !isDeployed) {
LOG.info("Region " + descriptiveName + " is in META, and in a disabled " +
"tabled that is not deployed");
return;
} else if (recentlyModified) {
LOG.warn("Region " + descriptiveName + " was recently modified -- skipping");
return;
}
// ========== Cases where the region is not in hbase:meta =============
else if (!inMeta && !inHdfs && !isDeployed) {
// We shouldn't have record of this region at all then!
assert false : "Entry for region with no data";
} else if (!inMeta && !inHdfs && isDeployed) {
errors.reportError(ERROR_CODE.NOT_IN_META_HDFS, "Region "
+ descriptiveName + ", key=" + key + ", not on HDFS or in hbase:meta but " +
"deployed on " + Joiner.on(", ").join(hbi.deployedOn));
if (shouldFixAssignments()) {
undeployRegions(hbi);
}
} else if (!inMeta && inHdfs && !isDeployed) {
if (hbi.isMerged()) {
// This region has already been merged, the remaining hdfs file will be
// cleaned by CatalogJanitor later
hbi.setSkipChecks(true);
LOG.info("Region " + descriptiveName
+ " got merge recently, its file(s) will be cleaned by CatalogJanitor later");
return;
}
errors.reportError(ERROR_CODE.NOT_IN_META_OR_DEPLOYED, "Region "
+ descriptiveName + " on HDFS, but not listed in hbase:meta " +
"or deployed on any region server");
// restore region consistency of an adopted orphan
if (shouldFixMeta()) {
if (!hbi.isHdfsRegioninfoPresent()) {
LOG.error("Region " + hbi.getHdfsHRI() + " could have been repaired"
+ " in table integrity repair phase if -fixHdfsOrphans was" +
" used.");
return;
}
HRegionInfo hri = hbi.getHdfsHRI();
TableInfo tableInfo = tablesInfo.get(hri.getTable());
if (tableInfo.regionsFromMeta.isEmpty()) {
for (HbckInfo h : regionInfoMap.values()) {
if (hri.getTable().equals(h.getTableName())) {
if (h.metaEntry != null) tableInfo.regionsFromMeta
.add((HRegionInfo) h.metaEntry);
}
}
Collections.sort(tableInfo.regionsFromMeta);
}
for (HRegionInfo region : tableInfo.regionsFromMeta) {
if (Bytes.compareTo(region.getStartKey(), hri.getStartKey()) <= 0
&& (region.getEndKey().length == 0 || Bytes.compareTo(region.getEndKey(),
hri.getEndKey()) >= 0)
&& Bytes.compareTo(region.getStartKey(), hri.getEndKey()) <= 0) {
if(region.isSplit() || region.isOffline()) continue;
Path regionDir = hbi.getHdfsRegionDir();
FileSystem fs = regionDir.getFileSystem(getConf());
List<Path> familyDirs = FSUtils.getFamilyDirs(fs, regionDir);
for (Path familyDir : familyDirs) {
List<Path> referenceFilePaths = FSUtils.getReferenceFilePaths(fs, familyDir);
for (Path referenceFilePath : referenceFilePaths) {
Path parentRegionDir =
StoreFileInfo.getReferredToFile(referenceFilePath).getParent().getParent();
if (parentRegionDir.toString().endsWith(region.getEncodedName())) {
LOG.warn(hri + " start and stop keys are in the range of " + region
+ ". The region might not be cleaned up from hdfs when region " + region
+ " split failed. Hence deleting from hdfs.");
HRegionFileSystem.deleteRegionFromFileSystem(getConf(), fs,
regionDir.getParent(), hri);
return;
}
}
}
}
}
LOG.info("Patching hbase:meta with .regioninfo: " + hbi.getHdfsHRI());
int numReplicas = admin.getTableDescriptor(hbi.getTableName()).getRegionReplication();
HBaseFsckRepair.fixMetaHoleOnlineAndAddReplicas(getConf(), hbi.getHdfsHRI(),
admin.getClusterStatus().getServers(), numReplicas);
tryAssignmentRepair(hbi, "Trying to reassign region...");
}
} else if (!inMeta && inHdfs && isDeployed) {
errors.reportError(ERROR_CODE.NOT_IN_META, "Region " + descriptiveName
+ " not in META, but deployed on " + Joiner.on(", ").join(hbi.deployedOn));
debugLsr(hbi.getHdfsRegionDir());
if (hbi.getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) {
// for replicas, this means that we should undeploy the region (we would have
// gone over the primaries and fixed meta holes in first phase under
// checkAndFixConsistency; we shouldn't get the condition !inMeta at
// this stage unless unwanted replica)
if (shouldFixAssignments()) {
undeployRegionsForHbi(hbi);
}
}
if (shouldFixMeta() && hbi.getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) {
if (!hbi.isHdfsRegioninfoPresent()) {
LOG.error("This should have been repaired in table integrity repair phase");
return;
}
LOG.info("Patching hbase:meta with with .regioninfo: " + hbi.getHdfsHRI());
int numReplicas = admin.getTableDescriptor(hbi.getTableName()).getRegionReplication();
HBaseFsckRepair.fixMetaHoleOnlineAndAddReplicas(getConf(), hbi.getHdfsHRI(),
admin.getClusterStatus().getServers(), numReplicas);
tryAssignmentRepair(hbi, "Trying to fix unassigned region...");
}
// ========== Cases where the region is in hbase:meta =============
} else if (inMeta && inHdfs && !isDeployed && splitParent) {
// check whether this is an actual error, or just transient state where parent
// is not cleaned
if (hbi.metaEntry.splitA != null && hbi.metaEntry.splitB != null) {
// check that split daughters are there
HbckInfo infoA = this.regionInfoMap.get(hbi.metaEntry.splitA.getEncodedName());
HbckInfo infoB = this.regionInfoMap.get(hbi.metaEntry.splitB.getEncodedName());
if (infoA != null && infoB != null) {
// we already processed or will process daughters. Move on, nothing to see here.
hbi.setSkipChecks(true);
return;
}
}
errors.reportError(ERROR_CODE.LINGERING_SPLIT_PARENT, "Region "
+ descriptiveName + " is a split parent in META, in HDFS, "
+ "and not deployed on any region server. This could be transient.");
if (shouldFixSplitParents()) {
setShouldRerun();
resetSplitParent(hbi);
}
} else if (inMeta && !inHdfs && !isDeployed) {
errors.reportError(ERROR_CODE.NOT_IN_HDFS_OR_DEPLOYED, "Region "
+ descriptiveName + " found in META, but not in HDFS "
+ "or deployed on any region server.");
if (shouldFixMeta()) {
deleteMetaRegion(hbi);
}
} else if (inMeta && !inHdfs && isDeployed) {
errors.reportError(ERROR_CODE.NOT_IN_HDFS, "Region " + descriptiveName
+ " found in META, but not in HDFS, " +
"and deployed on " + Joiner.on(", ").join(hbi.deployedOn));
// We treat HDFS as ground truth. Any information in meta is transient
// and equivalent data can be regenerated. So, lets unassign and remove
// these problems from META.
if (shouldFixAssignments()) {
errors.print("Trying to fix unassigned region...");
undeployRegions(hbi);
}
if (shouldFixMeta()) {
// wait for it to complete
deleteMetaRegion(hbi);
}
} else if (inMeta && inHdfs && !isDeployed && shouldBeDeployed) {
errors.reportError(ERROR_CODE.NOT_DEPLOYED, "Region " + descriptiveName
+ " not deployed on any region server.");
tryAssignmentRepair(hbi, "Trying to fix unassigned region...");
} else if (inMeta && inHdfs && isDeployed && !shouldBeDeployed) {
errors.reportError(ERROR_CODE.SHOULD_NOT_BE_DEPLOYED,
"Region " + descriptiveName + " should not be deployed according " +
"to META, but is deployed on " + Joiner.on(", ").join(hbi.deployedOn));
if (shouldFixAssignments()) {
errors.print("Trying to close the region " + descriptiveName);
setShouldRerun();
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, hbi.deployedOn);
}
} else if (inMeta && inHdfs && isMultiplyDeployed) {
errors.reportError(ERROR_CODE.MULTI_DEPLOYED, "Region " + descriptiveName
+ " is listed in hbase:meta on region server " + hbi.metaEntry.regionServer
+ " but is multiply assigned to region servers " +
Joiner.on(", ").join(hbi.deployedOn));
// If we are trying to fix the errors
if (shouldFixAssignments()) {
errors.print("Trying to fix assignment error...");
setShouldRerun();
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, hbi.deployedOn);
}
} else if (inMeta && inHdfs && isDeployed && !deploymentMatchesMeta) {
errors.reportError(ERROR_CODE.SERVER_DOES_NOT_MATCH_META, "Region "
+ descriptiveName + " listed in hbase:meta on region server " +
hbi.metaEntry.regionServer + " but found on region server " +
hbi.deployedOn.get(0));
// If we are trying to fix the errors
if (shouldFixAssignments()) {
errors.print("Trying to fix assignment error...");
setShouldRerun();
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, hbi.deployedOn);
HBaseFsckRepair.waitUntilAssigned(admin, hbi.getHdfsHRI());
}
} else {
errors.reportError(ERROR_CODE.UNKNOWN, "Region " + descriptiveName +
" is in an unforeseen state:" +
" inMeta=" + inMeta +
" inHdfs=" + inHdfs +
" isDeployed=" + isDeployed +
" isMultiplyDeployed=" + isMultiplyDeployed +
" deploymentMatchesMeta=" + deploymentMatchesMeta +
" shouldBeDeployed=" + shouldBeDeployed);
}
}
/**
* Checks tables integrity. Goes over all regions and scans the tables.
* Collects all the pieces for each table and checks if there are missing,
* repeated or overlapping ones.
* @throws IOException
*/
SortedMap<TableName, TableInfo> checkIntegrity() throws IOException {
tablesInfo = new TreeMap<TableName,TableInfo> ();
LOG.debug("There are " + regionInfoMap.size() + " region info entries");
for (HbckInfo hbi : regionInfoMap.values()) {
// Check only valid, working regions
if (hbi.metaEntry == null) {
// this assumes that consistency check has run loadMetaEntry
Path p = hbi.getHdfsRegionDir();
if (p == null) {
errors.report("No regioninfo in Meta or HDFS. " + hbi);
}
// TODO test.
continue;
}
if (hbi.metaEntry.regionServer == null) {
errors.detail("Skipping region because no region server: " + hbi);
continue;
}
if (hbi.metaEntry.isOffline()) {
errors.detail("Skipping region because it is offline: " + hbi);
continue;
}
if (hbi.containsOnlyHdfsEdits()) {
errors.detail("Skipping region because it only contains edits" + hbi);
continue;
}
// Missing regionDir or over-deployment is checked elsewhere. Include
// these cases in modTInfo, so we can evaluate those regions as part of
// the region chain in META
//if (hbi.foundRegionDir == null) continue;
//if (hbi.deployedOn.size() != 1) continue;
if (hbi.deployedOn.size() == 0) continue;
// We should be safe here
TableName tableName = hbi.metaEntry.getTable();
TableInfo modTInfo = tablesInfo.get(tableName);
if (modTInfo == null) {
modTInfo = new TableInfo(tableName);
}
for (ServerName server : hbi.deployedOn) {
modTInfo.addServer(server);
}
if (!hbi.isSkipChecks()) {
modTInfo.addRegionInfo(hbi);
}
tablesInfo.put(tableName, modTInfo);
}
loadTableInfosForTablesWithNoRegion();
for (TableInfo tInfo : tablesInfo.values()) {
TableIntegrityErrorHandler handler = tInfo.new IntegrityFixSuggester(tInfo, errors);
if (!tInfo.checkRegionChain(handler)) {
errors.report("Found inconsistency in table " + tInfo.getName());
}
}
return tablesInfo;
}
/** Loads table info's for tables that may not have been included, since there are no
* regions reported for the table, but table dir is there in hdfs
*/
private void loadTableInfosForTablesWithNoRegion() throws IOException {
Map<String, HTableDescriptor> allTables = new FSTableDescriptors(getConf()).getAll();
for (HTableDescriptor htd : allTables.values()) {
if (checkMetaOnly && !htd.isMetaTable()) {
continue;
}
TableName tableName = htd.getTableName();
if (isTableIncluded(tableName) && !tablesInfo.containsKey(tableName)) {
TableInfo tableInfo = new TableInfo(tableName);
tableInfo.htds.add(htd);
tablesInfo.put(htd.getTableName(), tableInfo);
}
}
}
/**
* Merge hdfs data by moving from contained HbckInfo into targetRegionDir.
* @return number of file move fixes done to merge regions.
*/
public int mergeRegionDirs(Path targetRegionDir, HbckInfo contained) throws IOException {
int fileMoves = 0;
String thread = Thread.currentThread().getName();
LOG.debug("[" + thread + "] Contained region dir after close and pause");
debugLsr(contained.getHdfsRegionDir());
// rename the contained into the container.
FileSystem fs = targetRegionDir.getFileSystem(getConf());
FileStatus[] dirs = null;
try {
dirs = fs.listStatus(contained.getHdfsRegionDir());
} catch (FileNotFoundException fnfe) {
// region we are attempting to merge in is not present! Since this is a merge, there is
// no harm skipping this region if it does not exist.
if (!fs.exists(contained.getHdfsRegionDir())) {
LOG.warn("[" + thread + "] HDFS region dir " + contained.getHdfsRegionDir()
+ " is missing. Assuming already sidelined or moved.");
} else {
sidelineRegionDir(fs, contained);
}
return fileMoves;
}
if (dirs == null) {
if (!fs.exists(contained.getHdfsRegionDir())) {
LOG.warn("[" + thread + "] HDFS region dir " + contained.getHdfsRegionDir()
+ " already sidelined.");
} else {
sidelineRegionDir(fs, contained);
}
return fileMoves;
}
for (FileStatus cf : dirs) {
Path src = cf.getPath();
Path dst = new Path(targetRegionDir, src.getName());
if (src.getName().equals(HRegionFileSystem.REGION_INFO_FILE)) {
// do not copy the old .regioninfo file.
continue;
}
if (src.getName().equals(HConstants.HREGION_OLDLOGDIR_NAME)) {
// do not copy the .oldlogs files
continue;
}
LOG.info("[" + thread + "] Moving files from " + src + " into containing region " + dst);
// FileSystem.rename is inconsistent with directories -- if the
// dst (foo/a) exists and is a dir, and the src (foo/b) is a dir,
// it moves the src into the dst dir resulting in (foo/a/b). If
// the dst does not exist, and the src a dir, src becomes dst. (foo/b)
for (FileStatus hfile : fs.listStatus(src)) {
boolean success = fs.rename(hfile.getPath(), dst);
if (success) {
fileMoves++;
}
}
LOG.debug("[" + thread + "] Sideline directory contents:");
debugLsr(targetRegionDir);
}
// if all success.
sidelineRegionDir(fs, contained);
LOG.info("[" + thread + "] Sidelined region dir "+ contained.getHdfsRegionDir() + " into " +
getSidelineDir());
debugLsr(contained.getHdfsRegionDir());
return fileMoves;
}
static class WorkItemOverlapMerge implements Callable<Void> {
private TableIntegrityErrorHandler handler;
Collection<HbckInfo> overlapgroup;
WorkItemOverlapMerge(Collection<HbckInfo> overlapgroup, TableIntegrityErrorHandler handler) {
this.handler = handler;
this.overlapgroup = overlapgroup;
}
@Override
public Void call() throws Exception {
handler.handleOverlapGroup(overlapgroup);
return null;
}
};
/**
* Maintain information about a particular table.
*/
public class TableInfo {
TableName tableName;
TreeSet <ServerName> deployedOn;
// backwards regions
final List<HbckInfo> backwards = new ArrayList<HbckInfo>();
// sidelined big overlapped regions
final Map<Path, HbckInfo> sidelinedRegions = new HashMap<Path, HbckInfo>();
// region split calculator
final RegionSplitCalculator<HbckInfo> sc = new RegionSplitCalculator<HbckInfo>(cmp);
// Histogram of different HTableDescriptors found. Ideally there is only one!
final Set<HTableDescriptor> htds = new HashSet<HTableDescriptor>();
// key = start split, values = set of splits in problem group
final Multimap<byte[], HbckInfo> overlapGroups =
TreeMultimap.create(RegionSplitCalculator.BYTES_COMPARATOR, cmp);
// list of regions derived from meta entries.
final List<HRegionInfo> regionsFromMeta = new ArrayList<HRegionInfo>();
TableInfo(TableName name) {
this.tableName = name;
deployedOn = new TreeSet <ServerName>();
}
/**
* @return descriptor common to all regions. null if are none or multiple!
*/
private HTableDescriptor getHTD() {
if (htds.size() == 1) {
return (HTableDescriptor)htds.toArray()[0];
} else {
LOG.error("None/Multiple table descriptors found for table '"
+ tableName + "' regions: " + htds);
}
return null;
}
public void addRegionInfo(HbckInfo hir) {
if (Bytes.equals(hir.getEndKey(), HConstants.EMPTY_END_ROW)) {
// end key is absolute end key, just add it.
// ignore replicas other than primary for these checks
if (hir.getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) sc.add(hir);
return;
}
// if not the absolute end key, check for cycle
if (Bytes.compareTo(hir.getStartKey(), hir.getEndKey()) > 0) {
errors.reportError(
ERROR_CODE.REGION_CYCLE,
String.format("The endkey for this region comes before the "
+ "startkey, startkey=%s, endkey=%s",
Bytes.toStringBinary(hir.getStartKey()),
Bytes.toStringBinary(hir.getEndKey())), this, hir);
backwards.add(hir);
return;
}
// main case, add to split calculator
// ignore replicas other than primary for these checks
if (hir.getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) sc.add(hir);
}
public void addServer(ServerName server) {
this.deployedOn.add(server);
}
public TableName getName() {
return tableName;
}
public int getNumRegions() {
return sc.getStarts().size() + backwards.size();
}
private class IntegrityFixSuggester extends TableIntegrityErrorHandlerImpl {
ErrorReporter errors;
IntegrityFixSuggester(TableInfo ti, ErrorReporter errors) {
this.errors = errors;
setTableInfo(ti);
}
@Override
public void handleRegionStartKeyNotEmpty(HbckInfo hi) throws IOException{
errors.reportError(ERROR_CODE.FIRST_REGION_STARTKEY_NOT_EMPTY,
"First region should start with an empty key. You need to "
+ " create a new region and regioninfo in HDFS to plug the hole.",
getTableInfo(), hi);
}
@Override
public void handleRegionEndKeyNotEmpty(byte[] curEndKey) throws IOException {
errors.reportError(ERROR_CODE.LAST_REGION_ENDKEY_NOT_EMPTY,
"Last region should end with an empty key. You need to "
+ "create a new region and regioninfo in HDFS to plug the hole.", getTableInfo());
}
@Override
public void handleDegenerateRegion(HbckInfo hi) throws IOException{
errors.reportError(ERROR_CODE.DEGENERATE_REGION,
"Region has the same start and end key.", getTableInfo(), hi);
}
@Override
public void handleDuplicateStartKeys(HbckInfo r1, HbckInfo r2) throws IOException{
byte[] key = r1.getStartKey();
// dup start key
errors.reportError(ERROR_CODE.DUPE_STARTKEYS,
"Multiple regions have the same startkey: "
+ Bytes.toStringBinary(key), getTableInfo(), r1);
errors.reportError(ERROR_CODE.DUPE_STARTKEYS,
"Multiple regions have the same startkey: "
+ Bytes.toStringBinary(key), getTableInfo(), r2);
}
@Override
public void handleOverlapInRegionChain(HbckInfo hi1, HbckInfo hi2) throws IOException{
errors.reportError(ERROR_CODE.OVERLAP_IN_REGION_CHAIN,
"There is an overlap in the region chain.",
getTableInfo(), hi1, hi2);
}
@Override
public void handleHoleInRegionChain(byte[] holeStart, byte[] holeStop) throws IOException{
errors.reportError(
ERROR_CODE.HOLE_IN_REGION_CHAIN,
"There is a hole in the region chain between "
+ Bytes.toStringBinary(holeStart) + " and "
+ Bytes.toStringBinary(holeStop)
+ ". You need to create a new .regioninfo and region "
+ "dir in hdfs to plug the hole.");
}
};
/**
* This handler fixes integrity errors from hdfs information. There are
* basically three classes of integrity problems 1) holes, 2) overlaps, and
* 3) invalid regions.
*
* This class overrides methods that fix holes and the overlap group case.
* Individual cases of particular overlaps are handled by the general
* overlap group merge repair case.
*
* If hbase is online, this forces regions offline before doing merge
* operations.
*/
private class HDFSIntegrityFixer extends IntegrityFixSuggester {
Configuration conf;
boolean fixOverlaps = true;
HDFSIntegrityFixer(TableInfo ti, ErrorReporter errors, Configuration conf,
boolean fixHoles, boolean fixOverlaps) {
super(ti, errors);
this.conf = conf;
this.fixOverlaps = fixOverlaps;
// TODO properly use fixHoles
}
/**
* This is a special case hole -- when the first region of a table is
* missing from META, HBase doesn't acknowledge the existance of the
* table.
*/
@Override
public void handleRegionStartKeyNotEmpty(HbckInfo next) throws IOException {
errors.reportError(ERROR_CODE.FIRST_REGION_STARTKEY_NOT_EMPTY,
"First region should start with an empty key. Creating a new " +
"region and regioninfo in HDFS to plug the hole.",
getTableInfo(), next);
HTableDescriptor htd = getTableInfo().getHTD();
// from special EMPTY_START_ROW to next region's startKey
HRegionInfo newRegion = new HRegionInfo(htd.getTableName(),
HConstants.EMPTY_START_ROW, next.getStartKey());
// TODO test
HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
LOG.info("Table region start key was not empty. Created new empty region: "
+ newRegion + " " +region);
fixes++;
}
@Override
public void handleRegionEndKeyNotEmpty(byte[] curEndKey) throws IOException {
errors.reportError(ERROR_CODE.LAST_REGION_ENDKEY_NOT_EMPTY,
"Last region should end with an empty key. Creating a new "
+ "region and regioninfo in HDFS to plug the hole.", getTableInfo());
HTableDescriptor htd = getTableInfo().getHTD();
// from curEndKey to EMPTY_START_ROW
HRegionInfo newRegion = new HRegionInfo(htd.getTableName(), curEndKey,
HConstants.EMPTY_START_ROW);
HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
LOG.info("Table region end key was not empty. Created new empty region: " + newRegion
+ " " + region);
fixes++;
}
/**
* There is a hole in the hdfs regions that violates the table integrity
* rules. Create a new empty region that patches the hole.
*/
@Override
public void handleHoleInRegionChain(byte[] holeStartKey, byte[] holeStopKey) throws IOException {
errors.reportError(
ERROR_CODE.HOLE_IN_REGION_CHAIN,
"There is a hole in the region chain between "
+ Bytes.toStringBinary(holeStartKey) + " and "
+ Bytes.toStringBinary(holeStopKey)
+ ". Creating a new regioninfo and region "
+ "dir in hdfs to plug the hole.");
HTableDescriptor htd = getTableInfo().getHTD();
HRegionInfo newRegion = new HRegionInfo(htd.getTableName(), holeStartKey, holeStopKey);
HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
LOG.info("Plugged hole by creating new empty region: "+ newRegion + " " +region);
fixes++;
}
/**
* This takes set of overlapping regions and merges them into a single
* region. This covers cases like degenerate regions, shared start key,
* general overlaps, duplicate ranges, and partial overlapping regions.
*
* Cases:
* - Clean regions that overlap
* - Only .oldlogs regions (can't find start/stop range, or figure out)
*
* This is basically threadsafe, except for the fixer increment in mergeOverlaps.
*/
@Override
public void handleOverlapGroup(Collection<HbckInfo> overlap)
throws IOException {
Preconditions.checkNotNull(overlap);
Preconditions.checkArgument(overlap.size() >0);
if (!this.fixOverlaps) {
LOG.warn("Not attempting to repair overlaps.");
return;
}
if (overlap.size() > maxMerge) {
LOG.warn("Overlap group has " + overlap.size() + " overlapping " +
"regions which is greater than " + maxMerge + ", the max number of regions to merge");
if (sidelineBigOverlaps) {
// we only sideline big overlapped groups that exceeds the max number of regions to merge
sidelineBigOverlaps(overlap);
}
return;
}
mergeOverlaps(overlap);
}
void mergeOverlaps(Collection<HbckInfo> overlap)
throws IOException {
String thread = Thread.currentThread().getName();
LOG.info("== [" + thread + "] Merging regions into one region: "
+ Joiner.on(",").join(overlap));
// get the min / max range and close all concerned regions
Pair<byte[], byte[]> range = null;
for (HbckInfo hi : overlap) {
if (range == null) {
range = new Pair<byte[], byte[]>(hi.getStartKey(), hi.getEndKey());
} else {
if (RegionSplitCalculator.BYTES_COMPARATOR
.compare(hi.getStartKey(), range.getFirst()) < 0) {
range.setFirst(hi.getStartKey());
}
if (RegionSplitCalculator.BYTES_COMPARATOR
.compare(hi.getEndKey(), range.getSecond()) > 0) {
range.setSecond(hi.getEndKey());
}
}
// need to close files so delete can happen.
LOG.debug("[" + thread + "] Closing region before moving data around: " + hi);
LOG.debug("[" + thread + "] Contained region dir before close");
debugLsr(hi.getHdfsRegionDir());
try {
LOG.info("[" + thread + "] Closing region: " + hi);
closeRegion(hi);
} catch (IOException ioe) {
LOG.warn("[" + thread + "] Was unable to close region " + hi
+ ". Just continuing... ", ioe);
} catch (InterruptedException e) {
LOG.warn("[" + thread + "] Was unable to close region " + hi
+ ". Just continuing... ", e);
}
try {
LOG.info("[" + thread + "] Offlining region: " + hi);
offline(hi.getRegionName());
} catch (IOException ioe) {
LOG.warn("[" + thread + "] Unable to offline region from master: " + hi
+ ". Just continuing... ", ioe);
}
}
// create new empty container region.
HTableDescriptor htd = getTableInfo().getHTD();
// from start key to end Key
HRegionInfo newRegion = new HRegionInfo(htd.getTableName(), range.getFirst(),
range.getSecond());
HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
LOG.info("[" + thread + "] Created new empty container region: " +
newRegion + " to contain regions: " + Joiner.on(",").join(overlap));
debugLsr(region.getRegionFileSystem().getRegionDir());
// all target regions are closed, should be able to safely cleanup.
boolean didFix= false;
Path target = region.getRegionFileSystem().getRegionDir();
for (HbckInfo contained : overlap) {
LOG.info("[" + thread + "] Merging " + contained + " into " + target );
int merges = mergeRegionDirs(target, contained);
if (merges > 0) {
didFix = true;
}
}
if (didFix) {
fixes++;
}
}
/**
* Sideline some regions in a big overlap group so that it
* will have fewer regions, and it is easier to merge them later on.
*
* @param bigOverlap the overlapped group with regions more than maxMerge
* @throws IOException
*/
void sidelineBigOverlaps(
Collection<HbckInfo> bigOverlap) throws IOException {
int overlapsToSideline = bigOverlap.size() - maxMerge;
if (overlapsToSideline > maxOverlapsToSideline) {
overlapsToSideline = maxOverlapsToSideline;
}
List<HbckInfo> regionsToSideline =
RegionSplitCalculator.findBigRanges(bigOverlap, overlapsToSideline);
FileSystem fs = FileSystem.get(conf);
for (HbckInfo regionToSideline: regionsToSideline) {
try {
LOG.info("Closing region: " + regionToSideline);
closeRegion(regionToSideline);
} catch (IOException ioe) {
LOG.warn("Was unable to close region " + regionToSideline
+ ". Just continuing... ", ioe);
} catch (InterruptedException e) {
LOG.warn("Was unable to close region " + regionToSideline
+ ". Just continuing... ", e);
}
try {
LOG.info("Offlining region: " + regionToSideline);
offline(regionToSideline.getRegionName());
} catch (IOException ioe) {
LOG.warn("Unable to offline region from master: " + regionToSideline
+ ". Just continuing... ", ioe);
}
LOG.info("Before sideline big overlapped region: " + regionToSideline.toString());
Path sidelineRegionDir = sidelineRegionDir(fs, TO_BE_LOADED, regionToSideline);
if (sidelineRegionDir != null) {
sidelinedRegions.put(sidelineRegionDir, regionToSideline);
LOG.info("After sidelined big overlapped region: "
+ regionToSideline.getRegionNameAsString()
+ " to " + sidelineRegionDir.toString());
fixes++;
}
}
}
}
/**
* Check the region chain (from META) of this table. We are looking for
* holes, overlaps, and cycles.
* @return false if there are errors
* @throws IOException
*/
public boolean checkRegionChain(TableIntegrityErrorHandler handler) throws IOException {
// When table is disabled no need to check for the region chain. Some of the regions
// accidently if deployed, this below code might report some issues like missing start
// or end regions or region hole in chain and may try to fix which is unwanted.
if (isTableDisabled(this.tableName)) {
return true;
}
int originalErrorsCount = errors.getErrorList().size();
Multimap<byte[], HbckInfo> regions = sc.calcCoverage();
SortedSet<byte[]> splits = sc.getSplits();
byte[] prevKey = null;
byte[] problemKey = null;
if (splits.size() == 0) {
// no region for this table
handler.handleHoleInRegionChain(HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
}
for (byte[] key : splits) {
Collection<HbckInfo> ranges = regions.get(key);
if (prevKey == null && !Bytes.equals(key, HConstants.EMPTY_BYTE_ARRAY)) {
for (HbckInfo rng : ranges) {
handler.handleRegionStartKeyNotEmpty(rng);
}
}
// check for degenerate ranges
for (HbckInfo rng : ranges) {
// special endkey case converts '' to null
byte[] endKey = rng.getEndKey();
endKey = (endKey.length == 0) ? null : endKey;
if (Bytes.equals(rng.getStartKey(),endKey)) {
handler.handleDegenerateRegion(rng);
}
}
if (ranges.size() == 1) {
// this split key is ok -- no overlap, not a hole.
if (problemKey != null) {
LOG.warn("reached end of problem group: " + Bytes.toStringBinary(key));
}
problemKey = null; // fell through, no more problem.
} else if (ranges.size() > 1) {
// set the new problem key group name, if already have problem key, just
// keep using it.
if (problemKey == null) {
// only for overlap regions.
LOG.warn("Naming new problem group: " + Bytes.toStringBinary(key));
problemKey = key;
}
overlapGroups.putAll(problemKey, ranges);
// record errors
ArrayList<HbckInfo> subRange = new ArrayList<HbckInfo>(ranges);
// this dumb and n^2 but this shouldn't happen often
for (HbckInfo r1 : ranges) {
if (r1.getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) continue;
subRange.remove(r1);
for (HbckInfo r2 : subRange) {
if (r2.getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) continue;
if (Bytes.compareTo(r1.getStartKey(), r2.getStartKey())==0) {
handler.handleDuplicateStartKeys(r1,r2);
} else {
// overlap
handler.handleOverlapInRegionChain(r1, r2);
}
}
}
} else if (ranges.size() == 0) {
if (problemKey != null) {
LOG.warn("reached end of problem group: " + Bytes.toStringBinary(key));
}
problemKey = null;
byte[] holeStopKey = sc.getSplits().higher(key);
// if higher key is null we reached the top.
if (holeStopKey != null) {
// hole
handler.handleHoleInRegionChain(key, holeStopKey);
}
}
prevKey = key;
}
// When the last region of a table is proper and having an empty end key, 'prevKey'
// will be null.
if (prevKey != null) {
handler.handleRegionEndKeyNotEmpty(prevKey);
}
// TODO fold this into the TableIntegrityHandler
if (getConf().getBoolean("hbasefsck.overlap.merge.parallel", true)) {
LOG.info("Handling overlap merges in parallel. set hbasefsck.overlap.merge.parallel to" +
" false to run serially.");
boolean ok = handleOverlapsParallel(handler, prevKey);
if (!ok) {
return false;
}
} else {
LOG.info("Handling overlap merges serially. set hbasefsck.overlap.merge.parallel to" +
" true to run in parallel.");
for (Collection<HbckInfo> overlap : overlapGroups.asMap().values()) {
handler.handleOverlapGroup(overlap);
}
}
if (details) {
// do full region split map dump
errors.print("---- Table '" + this.tableName
+ "': region split map");
dump(splits, regions);
errors.print("---- Table '" + this.tableName
+ "': overlap groups");
dumpOverlapProblems(overlapGroups);
errors.print("There are " + overlapGroups.keySet().size()
+ " overlap groups with " + overlapGroups.size()
+ " overlapping regions");
}
if (!sidelinedRegions.isEmpty()) {
LOG.warn("Sidelined big overlapped regions, please bulk load them!");
errors.print("---- Table '" + this.tableName
+ "': sidelined big overlapped regions");
dumpSidelinedRegions(sidelinedRegions);
}
return errors.getErrorList().size() == originalErrorsCount;
}
private boolean handleOverlapsParallel(TableIntegrityErrorHandler handler, byte[] prevKey)
throws IOException {
// we parallelize overlap handler for the case we have lots of groups to fix. We can
// safely assume each group is independent.
List<WorkItemOverlapMerge> merges = new ArrayList<WorkItemOverlapMerge>(overlapGroups.size());
List<Future<Void>> rets;
for (Collection<HbckInfo> overlap : overlapGroups.asMap().values()) {
//
merges.add(new WorkItemOverlapMerge(overlap, handler));
}
try {
rets = executor.invokeAll(merges);
} catch (InterruptedException e) {
LOG.error("Overlap merges were interrupted", e);
return false;
}
for(int i=0; i<merges.size(); i++) {
WorkItemOverlapMerge work = merges.get(i);
Future<Void> f = rets.get(i);
try {
f.get();
} catch(ExecutionException e) {
LOG.warn("Failed to merge overlap group" + work, e.getCause());
} catch (InterruptedException e) {
LOG.error("Waiting for overlap merges was interrupted", e);
return false;
}
}
return true;
}
/**
* This dumps data in a visually reasonable way for visual debugging
*
* @param splits
* @param regions
*/
void dump(SortedSet<byte[]> splits, Multimap<byte[], HbckInfo> regions) {
// we display this way because the last end key should be displayed as well.
StringBuilder sb = new StringBuilder();
for (byte[] k : splits) {
sb.setLength(0); // clear out existing buffer, if any.
sb.append(Bytes.toStringBinary(k) + ":\t");
for (HbckInfo r : regions.get(k)) {
sb.append("[ "+ r.toString() + ", "
+ Bytes.toStringBinary(r.getEndKey())+ "]\t");
}
errors.print(sb.toString());
}
}
}
public void dumpOverlapProblems(Multimap<byte[], HbckInfo> regions) {
// we display this way because the last end key should be displayed as
// well.
for (byte[] k : regions.keySet()) {
errors.print(Bytes.toStringBinary(k) + ":");
for (HbckInfo r : regions.get(k)) {
errors.print("[ " + r.toString() + ", "
+ Bytes.toStringBinary(r.getEndKey()) + "]");
}
errors.print("----");
}
}
public void dumpSidelinedRegions(Map<Path, HbckInfo> regions) {
for (Map.Entry<Path, HbckInfo> entry: regions.entrySet()) {
TableName tableName = entry.getValue().getTableName();
Path path = entry.getKey();
errors.print("This sidelined region dir should be bulk loaded: "
+ path.toString());
errors.print("Bulk load command looks like: "
+ "hbase org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles "
+ path.toUri().getPath() + " "+ tableName);
}
}
public Multimap<byte[], HbckInfo> getOverlapGroups(
TableName table) {
TableInfo ti = tablesInfo.get(table);
return ti.overlapGroups;
}
/**
* Return a list of user-space table names whose metadata have not been
* modified in the last few milliseconds specified by timelag
* if any of the REGIONINFO_QUALIFIER, SERVER_QUALIFIER, STARTCODE_QUALIFIER,
* SPLITA_QUALIFIER, SPLITB_QUALIFIER have not changed in the last
* milliseconds specified by timelag, then the table is a candidate to be returned.
* @return tables that have not been modified recently
* @throws IOException if an error is encountered
*/
HTableDescriptor[] getTables(AtomicInteger numSkipped) {
List<TableName> tableNames = new ArrayList<TableName>();
long now = System.currentTimeMillis();
for (HbckInfo hbi : regionInfoMap.values()) {
MetaEntry info = hbi.metaEntry;
// if the start key is zero, then we have found the first region of a table.
// pick only those tables that were not modified in the last few milliseconds.
if (info != null && info.getStartKey().length == 0 && !info.isMetaRegion()) {
if (info.modTime + timelag < now) {
tableNames.add(info.getTable());
} else {
numSkipped.incrementAndGet(); // one more in-flux table
}
}
}
return getHTableDescriptors(tableNames);
}
HTableDescriptor[] getHTableDescriptors(List<TableName> tableNames) {
HTableDescriptor[] htd = new HTableDescriptor[0];
Admin admin = null;
try {
LOG.info("getHTableDescriptors == tableNames => " + tableNames);
admin = new HBaseAdmin(getConf());
htd = admin.getTableDescriptorsByTableName(tableNames);
} catch (IOException e) {
LOG.debug("Exception getting table descriptors", e);
} finally {
if (admin != null) {
try {
admin.close();
} catch (IOException e) {
LOG.debug("Exception closing HBaseAdmin", e);
}
}
}
return htd;
}
/**
* Gets the entry in regionInfo corresponding to the the given encoded
* region name. If the region has not been seen yet, a new entry is added
* and returned.
*/
private synchronized HbckInfo getOrCreateInfo(String name) {
HbckInfo hbi = regionInfoMap.get(name);
if (hbi == null) {
hbi = new HbckInfo(null);
regionInfoMap.put(name, hbi);
}
return hbi;
}
private void checkAndFixTableLocks() throws IOException {
TableLockChecker checker = new TableLockChecker(createZooKeeperWatcher(), errors);
checker.checkTableLocks();
if (this.fixTableLocks) {
checker.fixExpiredTableLocks();
}
}
/**
* Check values in regionInfo for hbase:meta
* Check if zero or more than one regions with hbase:meta are found.
* If there are inconsistencies (i.e. zero or more than one regions
* pretend to be holding the hbase:meta) try to fix that and report an error.
* @throws IOException from HBaseFsckRepair functions
* @throws KeeperException
* @throws InterruptedException
*/
boolean checkMetaRegion() throws IOException, KeeperException, InterruptedException {
Map<Integer, HbckInfo> metaRegions = new HashMap<Integer, HbckInfo>();
for (HbckInfo value : regionInfoMap.values()) {
if (value.metaEntry != null && value.metaEntry.isMetaRegion()) {
metaRegions.put(value.getReplicaId(), value);
}
}
int metaReplication = admin.getTableDescriptor(TableName.META_TABLE_NAME)
.getRegionReplication();
boolean noProblem = true;
// There will be always entries in regionInfoMap corresponding to hbase:meta & its replicas
// Check the deployed servers. It should be exactly one server for each replica.
for (int i = 0; i < metaReplication; i++) {
HbckInfo metaHbckInfo = metaRegions.remove(i);
List<ServerName> servers = new ArrayList<ServerName>();
if (metaHbckInfo != null) {
servers = metaHbckInfo.deployedOn;
}
if (servers.size() != 1) {
noProblem = false;
if (servers.size() == 0) {
assignMetaReplica(i);
} else if (servers.size() > 1) {
errors
.reportError(ERROR_CODE.MULTI_META_REGION, "hbase:meta, replicaId " +
metaHbckInfo.getReplicaId() + " is found on more than one region.");
if (shouldFixAssignments()) {
errors.print("Trying to fix a problem with hbase:meta, replicaId " +
metaHbckInfo.getReplicaId() +"..");
setShouldRerun();
// try fix it (treat is a dupe assignment)
HBaseFsckRepair.fixMultiAssignment(connection, metaHbckInfo.metaEntry, servers);
}
}
}
}
// unassign whatever is remaining in metaRegions. They are excess replicas.
for (Map.Entry<Integer, HbckInfo> entry : metaRegions.entrySet()) {
noProblem = false;
errors.reportError(ERROR_CODE.SHOULD_NOT_BE_DEPLOYED,
"hbase:meta replicas are deployed in excess. Configured " + metaReplication +
", deployed " + metaRegions.size());
if (shouldFixAssignments()) {
errors.print("Trying to undeploy excess replica, replicaId: " + entry.getKey() +
" of hbase:meta..");
setShouldRerun();
unassignMetaReplica(entry.getValue());
}
}
// if noProblem is false, rerun hbck with hopefully fixed META
// if noProblem is true, no errors, so continue normally
return noProblem;
}
private void unassignMetaReplica(HbckInfo hi) throws IOException, InterruptedException,
KeeperException {
undeployRegions(hi);
ZooKeeperWatcher zkw = createZooKeeperWatcher();
ZKUtil.deleteNode(zkw, zkw.getZNodeForReplica(hi.metaEntry.getReplicaId()));
}
private void assignMetaReplica(int replicaId)
throws IOException, KeeperException, InterruptedException {
errors.reportError(ERROR_CODE.NO_META_REGION, "hbase:meta, replicaId " +
replicaId +" is not found on any region.");
if (shouldFixAssignments()) {
errors.print("Trying to fix a problem with hbase:meta..");
setShouldRerun();
// try to fix it (treat it as unassigned region)
HRegionInfo h = RegionReplicaUtil.getRegionInfoForReplica(
HRegionInfo.FIRST_META_REGIONINFO, replicaId);
HBaseFsckRepair.fixUnassigned(admin, h);
HBaseFsckRepair.waitUntilAssigned(admin, h);
}
}
/**
* Scan hbase:meta, adding all regions found to the regionInfo map.
* @throws IOException if an error is encountered
*/
boolean loadMetaEntries() throws IOException {
MetaScannerVisitor visitor = new MetaScannerVisitorBase() {
int countRecord = 1;
// comparator to sort KeyValues with latest modtime
final Comparator<Cell> comp = new Comparator<Cell>() {
@Override
public int compare(Cell k1, Cell k2) {
return (int)(k1.getTimestamp() - k2.getTimestamp());
}
};
@Override
public boolean processRow(Result result) throws IOException {
try {
// record the latest modification of this META record
long ts = Collections.max(result.listCells(), comp).getTimestamp();
RegionLocations rl = MetaTableAccessor.getRegionLocations(result);
if (rl == null) {
emptyRegionInfoQualifiers.add(result);
errors.reportError(ERROR_CODE.EMPTY_META_CELL,
"Empty REGIONINFO_QUALIFIER found in hbase:meta");
return true;
}
ServerName sn = null;
if (rl.getRegionLocation(HRegionInfo.DEFAULT_REPLICA_ID) == null ||
rl.getRegionLocation(HRegionInfo.DEFAULT_REPLICA_ID).getRegionInfo() == null) {
emptyRegionInfoQualifiers.add(result);
errors.reportError(ERROR_CODE.EMPTY_META_CELL,
"Empty REGIONINFO_QUALIFIER found in hbase:meta");
return true;
}
HRegionInfo hri = rl.getRegionLocation(HRegionInfo.DEFAULT_REPLICA_ID).getRegionInfo();
if (!(isTableIncluded(hri.getTable())
|| hri.isMetaRegion())) {
return true;
}
PairOfSameType<HRegionInfo> daughters = HRegionInfo.getDaughterRegions(result);
for (HRegionLocation h : rl.getRegionLocations()) {
if (h == null || h.getRegionInfo() == null) {
continue;
}
sn = h.getServerName();
hri = h.getRegionInfo();
MetaEntry m = null;
if (hri.getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) {
m = new MetaEntry(hri, sn, ts, daughters.getFirst(), daughters.getSecond());
} else {
m = new MetaEntry(hri, sn, ts, null, null);
}
HbckInfo previous = regionInfoMap.get(hri.getEncodedName());
if (previous == null) {
regionInfoMap.put(hri.getEncodedName(), new HbckInfo(m));
} else if (previous.metaEntry == null) {
previous.metaEntry = m;
} else {
throw new IOException("Two entries in hbase:meta are same " + previous);
}
}
PairOfSameType<HRegionInfo> mergeRegions = HRegionInfo.getMergeRegions(result);
for (HRegionInfo mergeRegion : new HRegionInfo[] {
mergeRegions.getFirst(), mergeRegions.getSecond() }) {
if (mergeRegion != null) {
// This region is already been merged
HbckInfo hbInfo = getOrCreateInfo(mergeRegion.getEncodedName());
hbInfo.setMerged(true);
}
}
// show proof of progress to the user, once for every 100 records.
if (countRecord % 100 == 0) {
errors.progress();
}
countRecord++;
return true;
} catch (RuntimeException e) {
LOG.error("Result=" + result);
throw e;
}
}
};
if (!checkMetaOnly) {
// Scan hbase:meta to pick up user regions
MetaScanner.metaScan(connection, visitor);
}
errors.print("");
return true;
}
/**
* Stores the regioninfo entries scanned from META
*/
static class MetaEntry extends HRegionInfo {
ServerName regionServer; // server hosting this region
long modTime; // timestamp of most recent modification metadata
HRegionInfo splitA, splitB; //split daughters
public MetaEntry(HRegionInfo rinfo, ServerName regionServer, long modTime) {
this(rinfo, regionServer, modTime, null, null);
}
public MetaEntry(HRegionInfo rinfo, ServerName regionServer, long modTime,
HRegionInfo splitA, HRegionInfo splitB) {
super(rinfo);
this.regionServer = regionServer;
this.modTime = modTime;
this.splitA = splitA;
this.splitB = splitB;
}
@Override
public boolean equals(Object o) {
boolean superEq = super.equals(o);
if (!superEq) {
return superEq;
}
MetaEntry me = (MetaEntry) o;
if (!regionServer.equals(me.regionServer)) {
return false;
}
return (modTime == me.modTime);
}
@Override
public int hashCode() {
int hash = Arrays.hashCode(getRegionName());
hash ^= getRegionId();
hash ^= Arrays.hashCode(getStartKey());
hash ^= Arrays.hashCode(getEndKey());
hash ^= Boolean.valueOf(isOffline()).hashCode();
hash ^= getTable().hashCode();
if (regionServer != null) {
hash ^= regionServer.hashCode();
}
hash ^= modTime;
return hash;
}
}
/**
* Stores the regioninfo entries from HDFS
*/
static class HdfsEntry {
HRegionInfo hri;
Path hdfsRegionDir = null;
long hdfsRegionDirModTime = 0;
boolean hdfsRegioninfoFilePresent = false;
boolean hdfsOnlyEdits = false;
}
/**
* Stores the regioninfo retrieved from Online region servers.
*/
static class OnlineEntry {
HRegionInfo hri;
ServerName hsa;
@Override
public String toString() {
return hsa.toString() + ";" + hri.getRegionNameAsString();
}
}
/**
* Maintain information about a particular region. It gathers information
* from three places -- HDFS, META, and region servers.
*/
public static class HbckInfo implements KeyRange {
private MetaEntry metaEntry = null; // info in META
private HdfsEntry hdfsEntry = null; // info in HDFS
private List<OnlineEntry> deployedEntries = Lists.newArrayList(); // on Region Server
private List<ServerName> deployedOn = Lists.newArrayList(); // info on RS's
private boolean skipChecks = false; // whether to skip further checks to this region info.
private boolean isMerged = false;// whether this region has already been merged into another one
private int deployedReplicaId = HRegionInfo.DEFAULT_REPLICA_ID;
private HRegionInfo primaryHRIForDeployedReplica = null;
HbckInfo(MetaEntry metaEntry) {
this.metaEntry = metaEntry;
}
public int getReplicaId() {
if (metaEntry != null) return metaEntry.getReplicaId();
return deployedReplicaId;
}
public synchronized void addServer(HRegionInfo hri, ServerName server) {
OnlineEntry rse = new OnlineEntry() ;
rse.hri = hri;
rse.hsa = server;
this.deployedEntries.add(rse);
this.deployedOn.add(server);
// save the replicaId that we see deployed in the cluster
this.deployedReplicaId = hri.getReplicaId();
this.primaryHRIForDeployedReplica =
RegionReplicaUtil.getRegionInfoForDefaultReplica(hri);
}
@Override
public synchronized String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{ meta => ");
sb.append((metaEntry != null)? metaEntry.getRegionNameAsString() : "null");
sb.append( ", hdfs => " + getHdfsRegionDir());
sb.append( ", deployed => " + Joiner.on(", ").join(deployedEntries));
sb.append( ", replicaId => " + getReplicaId());
sb.append(" }");
return sb.toString();
}
@Override
public byte[] getStartKey() {
if (this.metaEntry != null) {
return this.metaEntry.getStartKey();
} else if (this.hdfsEntry != null) {
return this.hdfsEntry.hri.getStartKey();
} else {
LOG.error("Entry " + this + " has no meta or hdfs region start key.");
return null;
}
}
@Override
public byte[] getEndKey() {
if (this.metaEntry != null) {
return this.metaEntry.getEndKey();
} else if (this.hdfsEntry != null) {
return this.hdfsEntry.hri.getEndKey();
} else {
LOG.error("Entry " + this + " has no meta or hdfs region start key.");
return null;
}
}
public TableName getTableName() {
if (this.metaEntry != null) {
return this.metaEntry.getTable();
} else if (this.hdfsEntry != null) {
// we are only guaranteed to have a path and not an HRI for hdfsEntry,
// so we get the name from the Path
Path tableDir = this.hdfsEntry.hdfsRegionDir.getParent();
return FSUtils.getTableName(tableDir);
} else {
// return the info from the first online/deployed hri
for (OnlineEntry e : deployedEntries) {
return e.hri.getTable();
}
return null;
}
}
public String getRegionNameAsString() {
if (metaEntry != null) {
return metaEntry.getRegionNameAsString();
} else if (hdfsEntry != null) {
if (hdfsEntry.hri != null) {
return hdfsEntry.hri.getRegionNameAsString();
}
} else {
// return the info from the first online/deployed hri
for (OnlineEntry e : deployedEntries) {
return e.hri.getRegionNameAsString();
}
}
return null;
}
public byte[] getRegionName() {
if (metaEntry != null) {
return metaEntry.getRegionName();
} else if (hdfsEntry != null) {
return hdfsEntry.hri.getRegionName();
} else {
// return the info from the first online/deployed hri
for (OnlineEntry e : deployedEntries) {
return e.hri.getRegionName();
}
return null;
}
}
public HRegionInfo getPrimaryHRIForDeployedReplica() {
return primaryHRIForDeployedReplica;
}
Path getHdfsRegionDir() {
if (hdfsEntry == null) {
return null;
}
return hdfsEntry.hdfsRegionDir;
}
boolean containsOnlyHdfsEdits() {
if (hdfsEntry == null) {
return false;
}
return hdfsEntry.hdfsOnlyEdits;
}
boolean isHdfsRegioninfoPresent() {
if (hdfsEntry == null) {
return false;
}
return hdfsEntry.hdfsRegioninfoFilePresent;
}
long getModTime() {
if (hdfsEntry == null) {
return 0;
}
return hdfsEntry.hdfsRegionDirModTime;
}
HRegionInfo getHdfsHRI() {
if (hdfsEntry == null) {
return null;
}
return hdfsEntry.hri;
}
public void setSkipChecks(boolean skipChecks) {
this.skipChecks = skipChecks;
}
public boolean isSkipChecks() {
return skipChecks;
}
public void setMerged(boolean isMerged) {
this.isMerged = isMerged;
}
public boolean isMerged() {
return this.isMerged;
}
}
final static Comparator<HbckInfo> cmp = new Comparator<HbckInfo>() {
@Override
public int compare(HbckInfo l, HbckInfo r) {
if (l == r) {
// same instance
return 0;
}
int tableCompare = l.getTableName().compareTo(r.getTableName());
if (tableCompare != 0) {
return tableCompare;
}
int startComparison = RegionSplitCalculator.BYTES_COMPARATOR.compare(
l.getStartKey(), r.getStartKey());
if (startComparison != 0) {
return startComparison;
}
// Special case for absolute endkey
byte[] endKey = r.getEndKey();
endKey = (endKey.length == 0) ? null : endKey;
byte[] endKey2 = l.getEndKey();
endKey2 = (endKey2.length == 0) ? null : endKey2;
int endComparison = RegionSplitCalculator.BYTES_COMPARATOR.compare(
endKey2, endKey);
if (endComparison != 0) {
return endComparison;
}
// use regionId as tiebreaker.
// Null is considered after all possible values so make it bigger.
if (l.hdfsEntry == null && r.hdfsEntry == null) {
return 0;
}
if (l.hdfsEntry == null && r.hdfsEntry != null) {
return 1;
}
// l.hdfsEntry must not be null
if (r.hdfsEntry == null) {
return -1;
}
// both l.hdfsEntry and r.hdfsEntry must not be null.
return (int) (l.hdfsEntry.hri.getRegionId()- r.hdfsEntry.hri.getRegionId());
}
};
/**
* Prints summary of all tables found on the system.
*/
private void printTableSummary(SortedMap<TableName, TableInfo> tablesInfo) {
StringBuilder sb = new StringBuilder();
errors.print("Summary:");
for (TableInfo tInfo : tablesInfo.values()) {
if (errors.tableHasErrors(tInfo)) {
errors.print("Table " + tInfo.getName() + " is inconsistent.");
} else {
errors.print(" " + tInfo.getName() + " is okay.");
}
errors.print(" Number of regions: " + tInfo.getNumRegions());
sb.setLength(0); // clear out existing buffer, if any.
sb.append(" Deployed on: ");
for (ServerName server : tInfo.deployedOn) {
sb.append(" " + server.toString());
}
errors.print(sb.toString());
}
}
static ErrorReporter getErrorReporter(
final Configuration conf) throws ClassNotFoundException {
Class<? extends ErrorReporter> reporter = conf.getClass("hbasefsck.errorreporter", PrintingErrorReporter.class, ErrorReporter.class);
return ReflectionUtils.newInstance(reporter, conf);
}
public interface ErrorReporter {
enum ERROR_CODE {
UNKNOWN, NO_META_REGION, NULL_META_REGION, NO_VERSION_FILE, NOT_IN_META_HDFS, NOT_IN_META,
NOT_IN_META_OR_DEPLOYED, NOT_IN_HDFS_OR_DEPLOYED, NOT_IN_HDFS, SERVER_DOES_NOT_MATCH_META,
NOT_DEPLOYED,
MULTI_DEPLOYED, SHOULD_NOT_BE_DEPLOYED, MULTI_META_REGION, RS_CONNECT_FAILURE,
FIRST_REGION_STARTKEY_NOT_EMPTY, LAST_REGION_ENDKEY_NOT_EMPTY, DUPE_STARTKEYS,
HOLE_IN_REGION_CHAIN, OVERLAP_IN_REGION_CHAIN, REGION_CYCLE, DEGENERATE_REGION,
ORPHAN_HDFS_REGION, LINGERING_SPLIT_PARENT, NO_TABLEINFO_FILE, LINGERING_REFERENCE_HFILE,
WRONG_USAGE, EMPTY_META_CELL, EXPIRED_TABLE_LOCK, BOUNDARIES_ERROR, ORPHAN_TABLE_STATE,
NO_TABLE_STATE
}
void clear();
void report(String message);
void reportError(String message);
void reportError(ERROR_CODE errorCode, String message);
void reportError(ERROR_CODE errorCode, String message, TableInfo table);
void reportError(ERROR_CODE errorCode, String message, TableInfo table, HbckInfo info);
void reportError(
ERROR_CODE errorCode,
String message,
TableInfo table,
HbckInfo info1,
HbckInfo info2
);
int summarize();
void detail(String details);
ArrayList<ERROR_CODE> getErrorList();
void progress();
void print(String message);
void resetErrors();
boolean tableHasErrors(TableInfo table);
}
static class PrintingErrorReporter implements ErrorReporter {
public int errorCount = 0;
private int showProgress;
Set<TableInfo> errorTables = new HashSet<TableInfo>();
// for use by unit tests to verify which errors were discovered
private ArrayList<ERROR_CODE> errorList = new ArrayList<ERROR_CODE>();
@Override
public void clear() {
errorTables.clear();
errorList.clear();
errorCount = 0;
}
@Override
public synchronized void reportError(ERROR_CODE errorCode, String message) {
if (errorCode == ERROR_CODE.WRONG_USAGE) {
System.err.println(message);
return;
}
errorList.add(errorCode);
if (!summary) {
System.out.println("ERROR: " + message);
}
errorCount++;
showProgress = 0;
}
@Override
public synchronized void reportError(ERROR_CODE errorCode, String message, TableInfo table) {
errorTables.add(table);
reportError(errorCode, message);
}
@Override
public synchronized void reportError(ERROR_CODE errorCode, String message, TableInfo table,
HbckInfo info) {
errorTables.add(table);
String reference = "(region " + info.getRegionNameAsString() + ")";
reportError(errorCode, reference + " " + message);
}
@Override
public synchronized void reportError(ERROR_CODE errorCode, String message, TableInfo table,
HbckInfo info1, HbckInfo info2) {
errorTables.add(table);
String reference = "(regions " + info1.getRegionNameAsString()
+ " and " + info2.getRegionNameAsString() + ")";
reportError(errorCode, reference + " " + message);
}
@Override
public synchronized void reportError(String message) {
reportError(ERROR_CODE.UNKNOWN, message);
}
/**
* Report error information, but do not increment the error count. Intended for cases
* where the actual error would have been reported previously.
* @param message
*/
@Override
public synchronized void report(String message) {
if (! summary) {
System.out.println("ERROR: " + message);
}
showProgress = 0;
}
@Override
public synchronized int summarize() {
System.out.println(Integer.toString(errorCount) +
" inconsistencies detected.");
if (errorCount == 0) {
System.out.println("Status: OK");
return 0;
} else {
System.out.println("Status: INCONSISTENT");
return -1;
}
}
@Override
public ArrayList<ERROR_CODE> getErrorList() {
return errorList;
}
@Override
public synchronized void print(String message) {
if (!summary) {
System.out.println(message);
}
}
@Override
public boolean tableHasErrors(TableInfo table) {
return errorTables.contains(table);
}
@Override
public void resetErrors() {
errorCount = 0;
}
@Override
public synchronized void detail(String message) {
if (details) {
System.out.println(message);
}
showProgress = 0;
}
@Override
public synchronized void progress() {
if (showProgress++ == 10) {
if (!summary) {
System.out.print(".");
}
showProgress = 0;
}
}
}
/**
* Contact a region server and get all information from it
*/
static class WorkItemRegion implements Callable<Void> {
private HBaseFsck hbck;
private ServerName rsinfo;
private ErrorReporter errors;
private HConnection connection;
WorkItemRegion(HBaseFsck hbck, ServerName info,
ErrorReporter errors, HConnection connection) {
this.hbck = hbck;
this.rsinfo = info;
this.errors = errors;
this.connection = connection;
}
@Override
public synchronized Void call() throws IOException {
errors.progress();
try {
BlockingInterface server = connection.getAdmin(rsinfo);
// list all online regions from this region server
List<HRegionInfo> regions = ProtobufUtil.getOnlineRegions(server);
regions = filterRegions(regions);
if (details) {
errors.detail("RegionServer: " + rsinfo.getServerName() +
" number of regions: " + regions.size());
for (HRegionInfo rinfo: regions) {
errors.detail(" " + rinfo.getRegionNameAsString() +
" id: " + rinfo.getRegionId() +
" encoded_name: " + rinfo.getEncodedName() +
" start: " + Bytes.toStringBinary(rinfo.getStartKey()) +
" end: " + Bytes.toStringBinary(rinfo.getEndKey()));
}
}
// check to see if the existence of this region matches the region in META
for (HRegionInfo r:regions) {
HbckInfo hbi = hbck.getOrCreateInfo(r.getEncodedName());
hbi.addServer(r, rsinfo);
}
} catch (IOException e) { // unable to connect to the region server.
errors.reportError(ERROR_CODE.RS_CONNECT_FAILURE, "RegionServer: " + rsinfo.getServerName() +
" Unable to fetch region information. " + e);
throw e;
}
return null;
}
private List<HRegionInfo> filterRegions(List<HRegionInfo> regions) {
List<HRegionInfo> ret = Lists.newArrayList();
for (HRegionInfo hri : regions) {
if (hri.isMetaTable() || (!hbck.checkMetaOnly
&& hbck.isTableIncluded(hri.getTable()))) {
ret.add(hri);
}
}
return ret;
}
}
/**
* Contact hdfs and get all information about specified table directory into
* regioninfo list.
*/
static class WorkItemHdfsDir implements Callable<Void> {
private HBaseFsck hbck;
private FileStatus tableDir;
private ErrorReporter errors;
private FileSystem fs;
WorkItemHdfsDir(HBaseFsck hbck, FileSystem fs, ErrorReporter errors,
FileStatus status) {
this.hbck = hbck;
this.fs = fs;
this.tableDir = status;
this.errors = errors;
}
@Override
public synchronized Void call() throws IOException {
try {
// level 2: <HBASE_DIR>/<table>/*
FileStatus[] regionDirs = fs.listStatus(tableDir.getPath());
for (FileStatus regionDir : regionDirs) {
String encodedName = regionDir.getPath().getName();
// ignore directories that aren't hexadecimal
if (!encodedName.toLowerCase().matches("[0-9a-f]+")) {
continue;
}
LOG.debug("Loading region info from hdfs:"+ regionDir.getPath());
HbckInfo hbi = hbck.getOrCreateInfo(encodedName);
HdfsEntry he = new HdfsEntry();
synchronized (hbi) {
if (hbi.getHdfsRegionDir() != null) {
errors.print("Directory " + encodedName + " duplicate??" +
hbi.getHdfsRegionDir());
}
he.hdfsRegionDir = regionDir.getPath();
he.hdfsRegionDirModTime = regionDir.getModificationTime();
Path regioninfoFile = new Path(he.hdfsRegionDir, HRegionFileSystem.REGION_INFO_FILE);
he.hdfsRegioninfoFilePresent = fs.exists(regioninfoFile);
// we add to orphan list when we attempt to read .regioninfo
// Set a flag if this region contains only edits
// This is special case if a region is left after split
he.hdfsOnlyEdits = true;
FileStatus[] subDirs = fs.listStatus(regionDir.getPath());
Path ePath = WALSplitter.getRegionDirRecoveredEditsDir(regionDir.getPath());
for (FileStatus subDir : subDirs) {
String sdName = subDir.getPath().getName();
if (!sdName.startsWith(".") && !sdName.equals(ePath.getName())) {
he.hdfsOnlyEdits = false;
break;
}
}
hbi.hdfsEntry = he;
}
}
} catch (IOException e) {
// unable to connect to the region server.
errors.reportError(ERROR_CODE.RS_CONNECT_FAILURE, "Table Directory: "
+ tableDir.getPath().getName()
+ " Unable to fetch region information. " + e);
throw e;
}
return null;
}
}
/**
* Contact hdfs and get all information about specified table directory into
* regioninfo list.
*/
static class WorkItemHdfsRegionInfo implements Callable<Void> {
private HbckInfo hbi;
private HBaseFsck hbck;
private ErrorReporter errors;
WorkItemHdfsRegionInfo(HbckInfo hbi, HBaseFsck hbck, ErrorReporter errors) {
this.hbi = hbi;
this.hbck = hbck;
this.errors = errors;
}
@Override
public synchronized Void call() throws IOException {
// only load entries that haven't been loaded yet.
if (hbi.getHdfsHRI() == null) {
try {
hbck.loadHdfsRegioninfo(hbi);
} catch (IOException ioe) {
String msg = "Orphan region in HDFS: Unable to load .regioninfo from table "
+ hbi.getTableName() + " in hdfs dir "
+ hbi.getHdfsRegionDir()
+ "! It may be an invalid format or version file. Treating as "
+ "an orphaned regiondir.";
errors.reportError(ERROR_CODE.ORPHAN_HDFS_REGION, msg);
try {
hbck.debugLsr(hbi.getHdfsRegionDir());
} catch (IOException ioe2) {
LOG.error("Unable to read directory " + hbi.getHdfsRegionDir(), ioe2);
throw ioe2;
}
hbck.orphanHdfsDirs.add(hbi);
throw ioe;
}
}
return null;
}
};
/**
* Display the full report from fsck. This displays all live and dead region
* servers, and all known regions.
*/
public void setDisplayFullReport() {
details = true;
}
/**
* Set summary mode.
* Print only summary of the tables and status (OK or INCONSISTENT)
*/
void setSummary() {
summary = true;
}
/**
* Set hbase:meta check mode.
* Print only info about hbase:meta table deployment/state
*/
void setCheckMetaOnly() {
checkMetaOnly = true;
}
/**
* Set region boundaries check mode.
*/
void setRegionBoundariesCheck() {
checkRegionBoundaries = true;
}
/**
* Set table locks fix mode.
* Delete table locks held for a long time
*/
public void setFixTableLocks(boolean shouldFix) {
fixTableLocks = shouldFix;
fixAny |= shouldFix;
}
/**
* Check if we should rerun fsck again. This checks if we've tried to
* fix something and we should rerun fsck tool again.
* Display the full report from fsck. This displays all live and dead
* region servers, and all known regions.
*/
void setShouldRerun() {
rerun = true;
}
boolean shouldRerun() {
return rerun;
}
/**
* Fix inconsistencies found by fsck. This should try to fix errors (if any)
* found by fsck utility.
*/
public void setFixAssignments(boolean shouldFix) {
fixAssignments = shouldFix;
fixAny |= shouldFix;
}
boolean shouldFixAssignments() {
return fixAssignments;
}
public void setFixMeta(boolean shouldFix) {
fixMeta = shouldFix;
fixAny |= shouldFix;
}
boolean shouldFixMeta() {
return fixMeta;
}
public void setFixEmptyMetaCells(boolean shouldFix) {
fixEmptyMetaCells = shouldFix;
fixAny |= shouldFix;
}
boolean shouldFixEmptyMetaCells() {
return fixEmptyMetaCells;
}
public void setCheckHdfs(boolean checking) {
checkHdfs = checking;
}
boolean shouldCheckHdfs() {
return checkHdfs;
}
public void setFixHdfsHoles(boolean shouldFix) {
fixHdfsHoles = shouldFix;
fixAny |= shouldFix;
}
boolean shouldFixHdfsHoles() {
return fixHdfsHoles;
}
public void setFixTableOrphans(boolean shouldFix) {
fixTableOrphans = shouldFix;
fixAny |= shouldFix;
}
boolean shouldFixTableOrphans() {
return fixTableOrphans;
}
public void setFixHdfsOverlaps(boolean shouldFix) {
fixHdfsOverlaps = shouldFix;
fixAny |= shouldFix;
}
boolean shouldFixHdfsOverlaps() {
return fixHdfsOverlaps;
}
public void setFixHdfsOrphans(boolean shouldFix) {
fixHdfsOrphans = shouldFix;
fixAny |= shouldFix;
}
boolean shouldFixHdfsOrphans() {
return fixHdfsOrphans;
}
public void setFixVersionFile(boolean shouldFix) {
fixVersionFile = shouldFix;
fixAny |= shouldFix;
}
public boolean shouldFixVersionFile() {
return fixVersionFile;
}
public void setSidelineBigOverlaps(boolean sbo) {
this.sidelineBigOverlaps = sbo;
}
public boolean shouldSidelineBigOverlaps() {
return sidelineBigOverlaps;
}
public void setFixSplitParents(boolean shouldFix) {
fixSplitParents = shouldFix;
fixAny |= shouldFix;
}
boolean shouldFixSplitParents() {
return fixSplitParents;
}
public void setFixReferenceFiles(boolean shouldFix) {
fixReferenceFiles = shouldFix;
fixAny |= shouldFix;
}
boolean shouldFixReferenceFiles() {
return fixReferenceFiles;
}
public boolean shouldIgnorePreCheckPermission() {
return !fixAny || ignorePreCheckPermission;
}
public void setIgnorePreCheckPermission(boolean ignorePreCheckPermission) {
this.ignorePreCheckPermission = ignorePreCheckPermission;
}
/**
* @param mm maximum number of regions to merge into a single region.
*/
public void setMaxMerge(int mm) {
this.maxMerge = mm;
}
public int getMaxMerge() {
return maxMerge;
}
public void setMaxOverlapsToSideline(int mo) {
this.maxOverlapsToSideline = mo;
}
public int getMaxOverlapsToSideline() {
return maxOverlapsToSideline;
}
/**
* Only check/fix tables specified by the list,
* Empty list means all tables are included.
*/
boolean isTableIncluded(TableName table) {
return (tablesIncluded.size() == 0) || tablesIncluded.contains(table);
}
public void includeTable(TableName table) {
tablesIncluded.add(table);
}
Set<TableName> getIncludedTables() {
return new HashSet<TableName>(tablesIncluded);
}
/**
* We are interested in only those tables that have not changed their state in
* hbase:meta during the last few seconds specified by hbase.admin.fsck.timelag
* @param seconds - the time in seconds
*/
public void setTimeLag(long seconds) {
timelag = seconds * 1000; // convert to milliseconds
}
/**
*
* @param sidelineDir - HDFS path to sideline data
*/
public void setSidelineDir(String sidelineDir) {
this.sidelineDir = new Path(sidelineDir);
}
protected HFileCorruptionChecker createHFileCorruptionChecker(boolean sidelineCorruptHFiles) throws IOException {
return new HFileCorruptionChecker(getConf(), executor, sidelineCorruptHFiles);
}
public HFileCorruptionChecker getHFilecorruptionChecker() {
return hfcc;
}
public void setHFileCorruptionChecker(HFileCorruptionChecker hfcc) {
this.hfcc = hfcc;
}
public void setRetCode(int code) {
this.retcode = code;
}
public int getRetCode() {
return retcode;
}
protected HBaseFsck printUsageAndExit() {
StringWriter sw = new StringWriter(2048);
PrintWriter out = new PrintWriter(sw);
out.println("Usage: fsck [opts] {only tables}");
out.println(" where [opts] are:");
out.println(" -help Display help options (this)");
out.println(" -details Display full report of all regions.");
out.println(" -timelag <timeInSeconds> Process only regions that " +
" have not experienced any metadata updates in the last " +
" <timeInSeconds> seconds.");
out.println(" -sleepBeforeRerun <timeInSeconds> Sleep this many seconds" +
" before checking if the fix worked if run with -fix");
out.println(" -summary Print only summary of the tables and status.");
out.println(" -metaonly Only check the state of the hbase:meta table.");
out.println(" -sidelineDir <hdfs://> HDFS path to backup existing meta.");
out.println(" -boundaries Verify that regions boundaries are the same between META and store files.");
out.println("");
out.println(" Metadata Repair options: (expert features, use with caution!)");
out.println(" -fix Try to fix region assignments. This is for backwards compatiblity");
out.println(" -fixAssignments Try to fix region assignments. Replaces the old -fix");
out.println(" -fixMeta Try to fix meta problems. This assumes HDFS region info is good.");
out.println(" -noHdfsChecking Don't load/check region info from HDFS."
+ " Assumes hbase:meta region info is good. Won't check/fix any HDFS issue, e.g. hole, orphan, or overlap");
out.println(" -fixHdfsHoles Try to fix region holes in hdfs.");
out.println(" -fixHdfsOrphans Try to fix region dirs with no .regioninfo file in hdfs");
out.println(" -fixTableOrphans Try to fix table dirs with no .tableinfo file in hdfs (online mode only)");
out.println(" -fixHdfsOverlaps Try to fix region overlaps in hdfs.");
out.println(" -fixVersionFile Try to fix missing hbase.version file in hdfs.");
out.println(" -maxMerge <n> When fixing region overlaps, allow at most <n> regions to merge. (n=" + DEFAULT_MAX_MERGE +" by default)");
out.println(" -sidelineBigOverlaps When fixing region overlaps, allow to sideline big overlaps");
out.println(" -maxOverlapsToSideline <n> When fixing region overlaps, allow at most <n> regions to sideline per group. (n=" + DEFAULT_OVERLAPS_TO_SIDELINE +" by default)");
out.println(" -fixSplitParents Try to force offline split parents to be online.");
out.println(" -ignorePreCheckPermission ignore filesystem permission pre-check");
out.println(" -fixReferenceFiles Try to offline lingering reference store files");
out.println(" -fixEmptyMetaCells Try to fix hbase:meta entries not referencing any region"
+ " (empty REGIONINFO_QUALIFIER rows)");
out.println("");
out.println(" Datafile Repair options: (expert features, use with caution!)");
out.println(" -checkCorruptHFiles Check all Hfiles by opening them to make sure they are valid");
out.println(" -sidelineCorruptHFiles Quarantine corrupted HFiles. implies -checkCorruptHFiles");
out.println("");
out.println(" Metadata Repair shortcuts");
out.println(" -repair Shortcut for -fixAssignments -fixMeta -fixHdfsHoles " +
"-fixHdfsOrphans -fixHdfsOverlaps -fixVersionFile -sidelineBigOverlaps -fixReferenceFiles -fixTableLocks");
out.println(" -repairHoles Shortcut for -fixAssignments -fixMeta -fixHdfsHoles");
out.println("");
out.println(" Table lock options");
out.println(" -fixTableLocks Deletes table locks held for a long time (hbase.table.lock.expire.ms, 10min by default)");
out.flush();
errors.reportError(ERROR_CODE.WRONG_USAGE, sw.toString());
setRetCode(-2);
return this;
}
/**
* Main program
*
* @param args
* @throws Exception
*/
public static void main(String[] args) throws Exception {
// create a fsck object
Configuration conf = HBaseConfiguration.create();
Path hbasedir = FSUtils.getRootDir(conf);
URI defaultFs = hbasedir.getFileSystem(conf).getUri();
FSUtils.setFsDefault(conf, new Path(defaultFs));
int ret = ToolRunner.run(new HBaseFsckTool(conf), args);
System.exit(ret);
}
/**
* This is a Tool wrapper that gathers -Dxxx=yyy configuration settings from the command line.
*/
static class HBaseFsckTool extends Configured implements Tool {
HBaseFsckTool(Configuration conf) { super(conf); }
@Override
public int run(String[] args) throws Exception {
HBaseFsck hbck = new HBaseFsck(getConf());
hbck.exec(hbck.executor, args);
hbck.close();
return hbck.getRetCode();
}
};
public HBaseFsck exec(ExecutorService exec, String[] args) throws KeeperException, IOException,
ServiceException, InterruptedException {
long sleepBeforeRerun = DEFAULT_SLEEP_BEFORE_RERUN;
boolean checkCorruptHFiles = false;
boolean sidelineCorruptHFiles = false;
// Process command-line args.
for (int i = 0; i < args.length; i++) {
String cmd = args[i];
if (cmd.equals("-help") || cmd.equals("-h")) {
return printUsageAndExit();
} else if (cmd.equals("-details")) {
setDisplayFullReport();
} else if (cmd.equals("-timelag")) {
if (i == args.length - 1) {
errors.reportError(ERROR_CODE.WRONG_USAGE, "HBaseFsck: -timelag needs a value.");
return printUsageAndExit();
}
try {
long timelag = Long.parseLong(args[i+1]);
setTimeLag(timelag);
} catch (NumberFormatException e) {
errors.reportError(ERROR_CODE.WRONG_USAGE, "-timelag needs a numeric value.");
return printUsageAndExit();
}
i++;
} else if (cmd.equals("-sleepBeforeRerun")) {
if (i == args.length - 1) {
errors.reportError(ERROR_CODE.WRONG_USAGE,
"HBaseFsck: -sleepBeforeRerun needs a value.");
return printUsageAndExit();
}
try {
sleepBeforeRerun = Long.parseLong(args[i+1]);
} catch (NumberFormatException e) {
errors.reportError(ERROR_CODE.WRONG_USAGE, "-sleepBeforeRerun needs a numeric value.");
return printUsageAndExit();
}
i++;
} else if (cmd.equals("-sidelineDir")) {
if (i == args.length - 1) {
errors.reportError(ERROR_CODE.WRONG_USAGE, "HBaseFsck: -sidelineDir needs a value.");
return printUsageAndExit();
}
i++;
setSidelineDir(args[i]);
} else if (cmd.equals("-fix")) {
errors.reportError(ERROR_CODE.WRONG_USAGE,
"This option is deprecated, please use -fixAssignments instead.");
setFixAssignments(true);
} else if (cmd.equals("-fixAssignments")) {
setFixAssignments(true);
} else if (cmd.equals("-fixMeta")) {
setFixMeta(true);
} else if (cmd.equals("-noHdfsChecking")) {
setCheckHdfs(false);
} else if (cmd.equals("-fixHdfsHoles")) {
setFixHdfsHoles(true);
} else if (cmd.equals("-fixHdfsOrphans")) {
setFixHdfsOrphans(true);
} else if (cmd.equals("-fixTableOrphans")) {
setFixTableOrphans(true);
} else if (cmd.equals("-fixHdfsOverlaps")) {
setFixHdfsOverlaps(true);
} else if (cmd.equals("-fixVersionFile")) {
setFixVersionFile(true);
} else if (cmd.equals("-sidelineBigOverlaps")) {
setSidelineBigOverlaps(true);
} else if (cmd.equals("-fixSplitParents")) {
setFixSplitParents(true);
} else if (cmd.equals("-ignorePreCheckPermission")) {
setIgnorePreCheckPermission(true);
} else if (cmd.equals("-checkCorruptHFiles")) {
checkCorruptHFiles = true;
} else if (cmd.equals("-sidelineCorruptHFiles")) {
sidelineCorruptHFiles = true;
} else if (cmd.equals("-fixReferenceFiles")) {
setFixReferenceFiles(true);
} else if (cmd.equals("-fixEmptyMetaCells")) {
setFixEmptyMetaCells(true);
} else if (cmd.equals("-repair")) {
// this attempts to merge overlapping hdfs regions, needs testing
// under load
setFixHdfsHoles(true);
setFixHdfsOrphans(true);
setFixMeta(true);
setFixAssignments(true);
setFixHdfsOverlaps(true);
setFixVersionFile(true);
setSidelineBigOverlaps(true);
setFixSplitParents(false);
setCheckHdfs(true);
setFixReferenceFiles(true);
setFixTableLocks(true);
} else if (cmd.equals("-repairHoles")) {
// this will make all missing hdfs regions available but may lose data
setFixHdfsHoles(true);
setFixHdfsOrphans(false);
setFixMeta(true);
setFixAssignments(true);
setFixHdfsOverlaps(false);
setSidelineBigOverlaps(false);
setFixSplitParents(false);
setCheckHdfs(true);
} else if (cmd.equals("-maxOverlapsToSideline")) {
if (i == args.length - 1) {
errors.reportError(ERROR_CODE.WRONG_USAGE,
"-maxOverlapsToSideline needs a numeric value argument.");
return printUsageAndExit();
}
try {
int maxOverlapsToSideline = Integer.parseInt(args[i+1]);
setMaxOverlapsToSideline(maxOverlapsToSideline);
} catch (NumberFormatException e) {
errors.reportError(ERROR_CODE.WRONG_USAGE,
"-maxOverlapsToSideline needs a numeric value argument.");
return printUsageAndExit();
}
i++;
} else if (cmd.equals("-maxMerge")) {
if (i == args.length - 1) {
errors.reportError(ERROR_CODE.WRONG_USAGE,
"-maxMerge needs a numeric value argument.");
return printUsageAndExit();
}
try {
int maxMerge = Integer.parseInt(args[i+1]);
setMaxMerge(maxMerge);
} catch (NumberFormatException e) {
errors.reportError(ERROR_CODE.WRONG_USAGE,
"-maxMerge needs a numeric value argument.");
return printUsageAndExit();
}
i++;
} else if (cmd.equals("-summary")) {
setSummary();
} else if (cmd.equals("-metaonly")) {
setCheckMetaOnly();
} else if (cmd.equals("-boundaries")) {
setRegionBoundariesCheck();
} else if (cmd.equals("-fixTableLocks")) {
setFixTableLocks(true);
} else if (cmd.startsWith("-")) {
errors.reportError(ERROR_CODE.WRONG_USAGE, "Unrecognized option:" + cmd);
return printUsageAndExit();
} else {
includeTable(TableName.valueOf(cmd));
errors.print("Allow checking/fixes for table: " + cmd);
}
}
errors.print("HBaseFsck command line options: " + StringUtils.join(args, " "));
// pre-check current user has FS write permission or not
try {
preCheckPermission();
} catch (AccessDeniedException ace) {
Runtime.getRuntime().exit(-1);
} catch (IOException ioe) {
Runtime.getRuntime().exit(-1);
}
// do the real work of hbck
connect();
try {
// if corrupt file mode is on, first fix them since they may be opened later
if (checkCorruptHFiles || sidelineCorruptHFiles) {
LOG.info("Checking all hfiles for corruption");
HFileCorruptionChecker hfcc = createHFileCorruptionChecker(sidelineCorruptHFiles);
setHFileCorruptionChecker(hfcc); // so we can get result
Collection<TableName> tables = getIncludedTables();
Collection<Path> tableDirs = new ArrayList<Path>();
Path rootdir = FSUtils.getRootDir(getConf());
if (tables.size() > 0) {
for (TableName t : tables) {
tableDirs.add(FSUtils.getTableDir(rootdir, t));
}
} else {
tableDirs = FSUtils.getTableDirs(FSUtils.getCurrentFileSystem(getConf()), rootdir);
}
hfcc.checkTables(tableDirs);
hfcc.report(errors);
}
// check and fix table integrity, region consistency.
int code = onlineHbck();
setRetCode(code);
// If we have changed the HBase state it is better to run hbck again
// to see if we haven't broken something else in the process.
// We run it only once more because otherwise we can easily fall into
// an infinite loop.
if (shouldRerun()) {
try {
LOG.info("Sleeping " + sleepBeforeRerun + "ms before re-checking after fix...");
Thread.sleep(sleepBeforeRerun);
} catch (InterruptedException ie) {
LOG.warn("Interrupted while sleeping");
return this;
}
// Just report
setFixAssignments(false);
setFixMeta(false);
setFixHdfsHoles(false);
setFixHdfsOverlaps(false);
setFixVersionFile(false);
setFixTableOrphans(false);
errors.resetErrors();
code = onlineHbck();
setRetCode(code);
}
} finally {
IOUtils.cleanup(null, this);
}
return this;
}
/**
* ls -r for debugging purposes
*/
void debugLsr(Path p) throws IOException {
debugLsr(getConf(), p, errors);
}
/**
* ls -r for debugging purposes
*/
public static void debugLsr(Configuration conf,
Path p) throws IOException {
debugLsr(conf, p, new PrintingErrorReporter());
}
/**
* ls -r for debugging purposes
*/
public static void debugLsr(Configuration conf,
Path p, ErrorReporter errors) throws IOException {
if (!LOG.isDebugEnabled() || p == null) {
return;
}
FileSystem fs = p.getFileSystem(conf);
if (!fs.exists(p)) {
// nothing
return;
}
errors.print(p.toString());
if (fs.isFile(p)) {
return;
}
if (fs.getFileStatus(p).isDirectory()) {
FileStatus[] fss= fs.listStatus(p);
for (FileStatus status : fss) {
debugLsr(conf, status.getPath(), errors);
}
}
}
}
| apache-2.0 |
hidekatsu-izuno/xmlic | src/main/java/net/arnx/xmlic/internal/org/jaxen/saxpath/base/Token.java | 3637 | /*
* $Header$
* $Revision: 1128 $
* $Date: 2006-02-05 13:49:04 -0800 (Sun, 05 Feb 2006) $
*
* ====================================================================
*
* Copyright 2000-2002 bob mcwhirter & James Strachan.
* All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* * Neither the name of the Jaxen Project nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
* IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ====================================================================
* This software consists of voluntary contributions made by many
* individuals on behalf of the Jaxen Project and was originally
* created by bob mcwhirter <bob@werken.com> and
* James Strachan <jstrachan@apache.org>. For more information on the
* Jaxen Project, please see <http://www.jaxen.org/>.
*
* $Id: Token.java 1128 2006-02-05 21:49:04Z elharo $
*/
package net.arnx.xmlic.internal.org.jaxen.saxpath.base;
class Token
{
private int tokenType;
private String parseText;
private int tokenBegin;
private int tokenEnd;
Token(int tokenType,
String parseText,
int tokenBegin,
int tokenEnd)
{
setTokenType( tokenType );
setParseText( parseText );
setTokenBegin( tokenBegin );
setTokenEnd( tokenEnd );
}
private void setTokenType(int tokenType)
{
this.tokenType = tokenType;
}
int getTokenType()
{
return this.tokenType;
}
private void setParseText(String parseText)
{
this.parseText = parseText;
}
String getTokenText()
{
return this.parseText.substring( getTokenBegin(),
getTokenEnd() );
}
private void setTokenBegin(int tokenBegin)
{
this.tokenBegin = tokenBegin;
}
int getTokenBegin()
{
return this.tokenBegin;
}
private void setTokenEnd(int tokenEnd)
{
this.tokenEnd = tokenEnd;
}
int getTokenEnd()
{
return this.tokenEnd;
}
public String toString()
{
return ("[ (" + tokenType + ") (" + getTokenText() + ")");
}
}
| apache-2.0 |
quarkusio/quarkus | extensions/container-image/container-image-jib/deployment/src/main/java/io/quarkus/container/image/jib/deployment/JibConfig.java | 8195 | package io.quarkus.container.image.jib.deployment;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import io.quarkus.runtime.annotations.ConfigItem;
import io.quarkus.runtime.annotations.ConfigPhase;
import io.quarkus.runtime.annotations.ConfigRoot;
@ConfigRoot(phase = ConfigPhase.BUILD_TIME)
public class JibConfig {
public static final String DEFAULT_WORKING_DIR = "/home/jboss";
/**
* The base image to be used when a container image is being produced for the jar build.
*
* When the application is built against Java 17 or higher, {@code registry.access.redhat.com/ubi8/openjdk-17-runtime:1.11}
* is used as the default.
* Otherwise {@code registry.access.redhat.com/ubi8/openjdk-11-runtime:1.11} is used as the default.
*/
@ConfigItem
public Optional<String> baseJvmImage;
/**
* The base image to be used when a container image is being produced for the native binary build.
* The default is "quay.io/quarkus/quarkus-micro-image". You can also use
* "registry.access.redhat.com/ubi8/ubi-minimal" which is a bigger base image, but provide more built-in utilities
* such as the microdnf package manager.
*/
@ConfigItem(defaultValue = "quay.io/quarkus/quarkus-micro-image:1.0")
public String baseNativeImage;
/**
* The JVM arguments to pass to the JVM when starting the application
*/
@ConfigItem(defaultValue = "-Djava.util.logging.manager=org.jboss.logmanager.LogManager")
public List<String> jvmArguments;
/**
* Additional JVM arguments to pass to the JVM when starting the application
*/
@ConfigItem
public Optional<List<String>> jvmAdditionalArguments;
/**
* Additional arguments to pass when starting the native application
*/
@ConfigItem
public Optional<List<String>> nativeArguments;
/**
* If this is set, then it will be used as the entry point of the container image.
* There are a few things to be aware of when creating an entry point
* <ul>
* <li>A valid entrypoint is jar package specific (see {@code quarkus.package.type})</li>
* <li>A valid entrypoint depends on the location of both the launching scripts and the application jar file. To that
* end it's helpful to remember that when {@code fast-jar} packaging is used (the default), all necessary application
* jars are added to the {@code /work} directory and that the same
* directory is also used as the working directory. When {@code legacy-jar} or {@code uber-jar} are used, the application
* jars
* are unpacked under the {@code /app} directory
* and that directory is used as the working directory.</li>
* <li>Even if the {@code jvmArguments} field is set, it is ignored completely</li>
* </ul>
*
* When this is not set, a proper default entrypoint will be constructed.
*
* As a final note, a very useful tool for inspecting container image layers that can greatly aid
* when debugging problems with endpoints is <a href="https://github.com/wagoodman/dive">dive</a>
*/
@ConfigItem
public Optional<List<String>> jvmEntrypoint;
/**
* If this is set, then it will be used as the entry point of the container image.
* There are a few things to be aware of when creating an entry point
* <ul>
* <li>A valid entrypoint depends on the location of both the launching scripts and the native binary file. To that end
* it's helpful to remember that the native application is added to the {@code /work} directory and that and the same
* directory is also used as the working directory</li>
* <li>Even if the {@code nativeArguments} field is set, it is ignored completely</li>
* </ul>
*
* When this is not set, a proper default entrypoint will be constructed.
*
* As a final note, a very useful tool for inspecting container image layers that can greatly aid
* when debugging problems with endpoints is <a href="https://github.com/wagoodman/dive">dive</a>
*/
@ConfigItem
public Optional<List<String>> nativeEntrypoint;
/**
* Environment variables to add to the container image
*/
@ConfigItem
public Map<String, String> environmentVariables;
/**
* Custom labels to add to the generated image
*
* @deprecated Use 'quarkus.container-image.labels' instead
*/
@ConfigItem
@Deprecated
public Map<String, String> labels;
/**
* The username to use to authenticate with the registry used to pull the base JVM image
*/
@ConfigItem
public Optional<String> baseRegistryUsername;
/**
* The password to use to authenticate with the registry used to pull the base JVM image
*/
@ConfigItem
public Optional<String> baseRegistryPassword;
/**
* The ports to expose
*/
@ConfigItem(defaultValue = "${quarkus.http.port:8080}")
public List<Integer> ports;
/**
* The user to use in generated image
*/
@ConfigItem
public Optional<String> user;
/**
* The working directory to use in the generated image.
* The default value is chosen to work in accordance with the default base image.
*/
@ConfigItem(defaultValue = DEFAULT_WORKING_DIR)
public String workingDirectory;
/**
* Controls the optimization which skips downloading base image layers that exist in a target
* registry. If the user does not set this property, then read as false.
*
* If {@code true}, base image layers are always pulled and cached. If
* {@code false}, base image layers will not be pulled/cached if they already exist on the
* target registry.
*/
@ConfigItem(defaultValue = "false")
public boolean alwaysCacheBaseImage;
/**
* List of target platforms. Each platform is defined using the pattern:
*
* <pre>
* {@literal <os>|<arch>[/variant]|<os>/<arch>[/variant]}
* </pre>
*
* for example:
*
* <pre>
* {@literal linux/amd64,linux/arm64/v8}
* </pre>
*
* If not specified, OS default is linux and architecture default is amd64
*
* If more than one platform is configured, it is important to note that the base image has to be a Docker manifest or an
* OCI image index containing a version of each chosen platform
*
* It doesn't work with native images, as cross-compilation is not supported
*
* Target Platform is a incubating feature of Jib. See <a href=
* "https://github.com/GoogleContainerTools/jib/blob/master/docs/faq.md#how-do-i-specify-a-platform-in-the-manifest-list-or-oci-index-of-a-base-image">Jib
* FAQ</a> for more information
*/
@ConfigItem
public Optional<Set<String>> platforms;
/**
* The path of a file that will be written containing the digest of the generated image.
* If the path is relative, is writen to the output directory of the build tool
*/
@ConfigItem(defaultValue = "jib-image.digest")
public String imageDigestFile;
/**
* The path of a file that will be written containing the id of the generated image.
* If the path is relative, is writen to the output directory of the build tool
*/
@ConfigItem(defaultValue = "jib-image.id")
public String imageIdFile;
/**
* Whether or not to operate offline.
*/
@ConfigItem(defaultValue = "false")
public boolean offlineMode;
/**
* Name of binary used to execute the docker commands. This is only used by Jib
* when the container image is being built locally.
*/
@ConfigItem
public Optional<String> dockerExecutableName;
/**
* Whether to set the creation time to the actual build time. Otherwise, the creation time
* will be set to the Unix epoch (00:00:00, January 1st, 1970 in UTC). See <a href=
* "https://github.com/GoogleContainerTools/jib/blob/master/docs/faq.md#why-is-my-image-created-48-years-ago">Jib
* FAQ</a> for more information
*/
@ConfigItem(defaultValue = "true")
public boolean useCurrentTimestamp;
}
| apache-2.0 |
edouardKaiser/lagom | dev/sbt-scripted-library/src/main/java/impl/FooModule.java | 1069 | package impl;
import com.google.inject.AbstractModule;
import com.lightbend.lagom.javadsl.server.ServiceGuiceSupport;
import api.FooService;
import play.*;
import javax.inject.Inject;
import java.util.Date;
import java.io.*;
public class FooModule extends AbstractModule implements ServiceGuiceSupport {
@Override
protected void configure() {
bindService(FooService.class, FooServiceImpl.class);
bind(FooOnStart.class).asEagerSingleton();
}
}
class FooOnStart {
@Inject
public FooOnStart(Application app) {
doOnStart(app);
}
private void doOnStart(Application app) {
try {
// open for append
FileWriter writer = new FileWriter(app.getFile("target/reload.log"), true);
writer.write(new Date() + " - reloaded\n");
writer.close();
if (app.configuration().getBoolean("fail", false)) {
throw new RuntimeException();
}
}
catch(IOException e) {
throw new RuntimeException(e);
}
}
}
| apache-2.0 |
wangqi/gameserver | server/src/main/java/com/xinqihd/sns/gameserver/handler/BceRegisterHandler.java | 2392 | package com.xinqihd.sns.gameserver.handler;
import org.apache.mina.core.session.IoSession;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.xinqihd.sns.gameserver.db.mongo.AccountManager;
import com.xinqihd.sns.gameserver.db.mongo.LoginManager;
import com.xinqihd.sns.gameserver.proto.XinqiBceRegister;
import com.xinqihd.sns.gameserver.session.SessionKey;
import com.xinqihd.sns.gameserver.transport.SimpleChannelHandler;
import com.xinqihd.sns.gameserver.transport.XinqiMessage;
import com.xinqihd.sns.gameserver.util.StringUtil;
/**
* The BceRegisterHandler is used for protocol Register
* @author wangqi
*
*/
public class BceRegisterHandler extends SimpleChannelHandler {
private Logger logger = LoggerFactory.getLogger(BceRegisterHandler.class);
private static final BceRegisterHandler instance = new BceRegisterHandler();
private BceRegisterHandler() {
super();
}
public static BceRegisterHandler getInstance() {
return instance;
}
@Override
public void messageProcess(IoSession session, Object message, SessionKey sessionKey)
throws Exception {
if ( logger.isDebugEnabled() ) {
logger.debug("->BceRegister");
}
XinqiMessage request = (XinqiMessage)message;
/**
* Get the user registration info.
*/
XinqiBceRegister.BceRegister registerInfo = ((XinqiBceRegister.BceRegister)request.payload);
String userName = registerInfo.getUsername();
String password = registerInfo.getPassword();
String clientVersion = StringUtil.concat(registerInfo.getMajorversion(), ".",
registerInfo.getMinorversion(), ".", registerInfo.getTinyversion());
/**
* Use the new accounting system
*/
/*
LoginManager.getInstance().register(
session, sessionKey, registerInfo.getUsername(), registerInfo.getRolename(),
registerInfo.getPassword(), registerInfo.getEmail(), registerInfo.getGender(),
registerInfo.getClient(), registerInfo.getCountry(), registerInfo.getChannel(),
registerInfo.getLocx(), registerInfo.getLocy());
*/
AccountManager.getInstance().register(
session, sessionKey, registerInfo.getUsername(), registerInfo.getRolename(),
registerInfo.getPassword(), registerInfo.getEmail(), registerInfo.getGender(),
registerInfo.getClient(), registerInfo.getCountry(), registerInfo.getChannel(),
registerInfo.getLocx(), registerInfo.getLocy(), clientVersion);
}
}
| apache-2.0 |
rototor/pdfbox-graphics2d | graphics2d/src/main/java/de/rototor/pdfbox/graphics2d/IPdfBoxGraphics2DPaintApplier.java | 3011 | package de.rototor.pdfbox.graphics2d;
import org.apache.pdfbox.pdmodel.PDDocument;
import org.apache.pdfbox.pdmodel.PDPageContentStream;
import org.apache.pdfbox.pdmodel.PDResources;
import org.apache.pdfbox.pdmodel.graphics.shading.PDShading;
import java.awt.*;
import java.awt.geom.AffineTransform;
import java.io.IOException;
/**
* Apply the given paint on the Content Stream.
*/
public interface IPdfBoxGraphics2DPaintApplier
{
/**
* Apply the paint on the ContentStream
*
* @param paint the paint which should be applied
* @param contentStream the content stream to apply the paint on
* @param currentTransform the current transform of the Graphics2D relative to the
* contentStream default coordinate space. This is always a copy of the
* current transform, so we can modify it.
* @param env Environment for mapping the paint.
* @return null or a PDShading which should be used to fill a shape.
* @throws IOException if its not possible to write the paint into the contentStream
*/
PDShading applyPaint(Paint paint, PDPageContentStream contentStream,
AffineTransform currentTransform, IPaintEnv env) throws IOException;
/**
* The different mappers used by the paint applier. This interface is
* implemented internally by {@link PdfBoxGraphics2D}
*/
interface IPaintEnv
{
/**
* @return the color mapper
*/
IPdfBoxGraphics2DColorMapper getColorMapper();
/**
* @return the image encoder
*/
IPdfBoxGraphics2DImageEncoder getImageEncoder();
/**
* @return the document
*/
PDDocument getDocument();
/**
* @return the resource of the content stream
*/
PDResources getResources();
/**
* @return the {@link Graphics2D} {@link Composite}
*/
Composite getComposite();
/**
* @return The PdfBoxGraphics2D
*/
PdfBoxGraphics2D getGraphics2D();
/**
* @return the {@link Graphics2D} XOR Mode {@link Color} or null if paint mode
* is active.
*/
@SuppressWarnings("unused")
Color getXORMode();
/**
* The shape information is need to be able to correctly render grandients.
*
* @return get the shape which will be drawn or filled with this paint. Null is
* returned if no shape is known.
*/
Shape getShapeToDraw();
/**
* Call this to ensure that the current shape has been walked on the content stream. You are
* not allowed to do any graphics state changes after this, as this would be illegal in PDF.
* (But most PDF reader render this fine anyway...)<br>
* This mostly only needed for ShadedFills
*/
void ensureShapeIsWalked() throws IOException;
}
}
| apache-2.0 |
ayyoob/carbon-device-mgt-plugins | components/device-mgt/org.wso2.carbon.device.mgt.mobile.impl/src/main/java/org/wso2/carbon/device/mgt/mobile/impl/android/AndroidDeviceManager.java | 13796 | /*
* Copyright (c) 2015, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
*
* WSO2 Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except
* in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.wso2.carbon.device.mgt.mobile.impl.android;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.wso2.carbon.device.mgt.common.*;
import org.wso2.carbon.device.mgt.common.configuration.mgt.TenantConfiguration;
import org.wso2.carbon.device.mgt.common.license.mgt.License;
import org.wso2.carbon.device.mgt.common.license.mgt.LicenseManagementException;
import org.wso2.carbon.device.mgt.common.license.mgt.LicenseManager;
import org.wso2.carbon.device.mgt.extensions.license.mgt.registry.RegistryBasedLicenseManager;
import org.wso2.carbon.device.mgt.mobile.common.MobileDeviceMgtPluginException;
import org.wso2.carbon.device.mgt.mobile.common.MobilePluginConstants;
import org.wso2.carbon.device.mgt.mobile.dao.MobileDeviceManagementDAOException;
import org.wso2.carbon.device.mgt.mobile.dao.MobileDeviceManagementDAOFactory;
import org.wso2.carbon.device.mgt.mobile.dto.MobileDevice;
import org.wso2.carbon.device.mgt.mobile.impl.android.dao.AndroidDAOFactory;
import org.wso2.carbon.device.mgt.mobile.impl.android.util.AndroidPluginUtils;
import org.wso2.carbon.device.mgt.mobile.util.MobileDeviceManagementUtil;
import org.wso2.carbon.registry.api.RegistryException;
import org.wso2.carbon.registry.api.Resource;
import javax.xml.bind.JAXBContext;
import javax.xml.bind.JAXBException;
import javax.xml.bind.Marshaller;
import javax.xml.bind.Unmarshaller;
import java.io.StringReader;
import java.io.StringWriter;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.List;
public class AndroidDeviceManager implements DeviceManager {
private MobileDeviceManagementDAOFactory daoFactory;
private static final Log log = LogFactory.getLog(AndroidDeviceManagementService.class);
private FeatureManager featureManager = new AndroidFeatureManager();
private LicenseManager licenseManager;
public AndroidDeviceManager() {
this.daoFactory = new AndroidDAOFactory();
this.licenseManager = new RegistryBasedLicenseManager();
License defaultLicense = AndroidPluginUtils.getDefaultLicense();
try {
licenseManager
.addLicense(AndroidDeviceManagementService.DEVICE_TYPE_ANDROID, defaultLicense);
} catch (LicenseManagementException e) {
log.error("Error occurred while adding default license for Android devices", e);
}
}
@Override
public FeatureManager getFeatureManager() {
return featureManager;
}
@Override
public boolean saveConfiguration(TenantConfiguration tenantConfiguration)
throws DeviceManagementException {
boolean status;
try {
if (log.isDebugEnabled()) {
log.debug("Persisting android configurations in Registry");
}
String resourcePath = MobileDeviceManagementUtil.getPlatformConfigPath(
DeviceManagementConstants.
MobileDeviceTypes.MOBILE_DEVICE_TYPE_ANDROID);
StringWriter writer = new StringWriter();
JAXBContext context = JAXBContext.newInstance(TenantConfiguration.class);
Marshaller marshaller = context.createMarshaller();
marshaller.marshal(tenantConfiguration, writer);
Resource resource = MobileDeviceManagementUtil.getConfigurationRegistry().newResource();
resource.setContent(writer.toString());
resource.setMediaType(MobilePluginConstants.MEDIA_TYPE_XML);
MobileDeviceManagementUtil.putRegistryResource(resourcePath, resource);
status = true;
} catch (MobileDeviceMgtPluginException e) {
throw new DeviceManagementException(
"Error occurred while retrieving the Registry instance : " + e.getMessage(), e);
} catch (RegistryException e) {
throw new DeviceManagementException(
"Error occurred while persisting the Registry resource of Android Configuration : " + e.getMessage(), e);
} catch (JAXBException e) {
throw new DeviceManagementException(
"Error occurred while parsing the Android configuration : " + e.getMessage(), e);
}
return status;
}
@Override
public TenantConfiguration getConfiguration() throws DeviceManagementException {
Resource resource;
try {
String androidRegPath =
MobileDeviceManagementUtil.getPlatformConfigPath(DeviceManagementConstants.
MobileDeviceTypes.MOBILE_DEVICE_TYPE_ANDROID);
resource = MobileDeviceManagementUtil.getRegistryResource(androidRegPath);
if(resource != null){
JAXBContext context = JAXBContext.newInstance(TenantConfiguration.class);
Unmarshaller unmarshaller = context.createUnmarshaller();
return (TenantConfiguration) unmarshaller.unmarshal(
new StringReader(new String((byte[]) resource.getContent(), Charset
.forName(MobilePluginConstants.CHARSET_UTF8))));
}
return new TenantConfiguration();
} catch (MobileDeviceMgtPluginException e) {
throw new DeviceManagementException(
"Error occurred while retrieving the Registry instance : " + e.getMessage(), e);
} catch (JAXBException e) {
throw new DeviceManagementException(
"Error occurred while parsing the Android configuration : " + e.getMessage(), e);
} catch (RegistryException e) {
throw new DeviceManagementException(
"Error occurred while retrieving the Registry resource of Android Configuration : " + e.getMessage(), e);
}
}
@Override
public boolean enrollDevice(Device device) throws DeviceManagementException {
boolean status = false;
MobileDevice mobileDevice = MobileDeviceManagementUtil.convertToMobileDevice(device);
try {
if (log.isDebugEnabled()) {
log.debug("Enrolling a new Android device : " + device.getDeviceIdentifier());
}
boolean isEnrolled = this.isEnrolled(
new DeviceIdentifier(device.getDeviceIdentifier(), device.getType()));
if (isEnrolled) {
this.modifyEnrollment(device);
} else {
AndroidDAOFactory.beginTransaction();
status = daoFactory.getMobileDeviceDAO().addMobileDevice(mobileDevice);
AndroidDAOFactory.commitTransaction();
}
} catch (MobileDeviceManagementDAOException e) {
try {
AndroidDAOFactory.rollbackTransaction();
} catch (MobileDeviceManagementDAOException mobileDAOEx) {
String msg = "Error occurred while roll back the device enrol transaction :" +
device.toString();
log.warn(msg, mobileDAOEx);
}
String msg =
"Error while enrolling the Android device : " + device.getDeviceIdentifier();
log.error(msg, e);
throw new DeviceManagementException(msg, e);
}
return status;
}
@Override
public boolean modifyEnrollment(Device device) throws DeviceManagementException {
boolean status;
MobileDevice mobileDevice = MobileDeviceManagementUtil.convertToMobileDevice(device);
try {
if (log.isDebugEnabled()) {
log.debug("Modifying the Android device enrollment data");
}
AndroidDAOFactory.beginTransaction();
status = daoFactory.getMobileDeviceDAO().updateMobileDevice(mobileDevice);
AndroidDAOFactory.commitTransaction();
} catch (MobileDeviceManagementDAOException e) {
try {
AndroidDAOFactory.rollbackTransaction();
} catch (MobileDeviceManagementDAOException mobileDAOEx) {
String msg = "Error occurred while roll back the update device transaction :" +
device.toString();
log.warn(msg, mobileDAOEx);
}
String msg = "Error while updating the enrollment of the Android device : " +
device.getDeviceIdentifier();
log.error(msg, e);
throw new DeviceManagementException(msg, e);
}
return status;
}
@Override
public boolean disenrollDevice(DeviceIdentifier deviceId) throws DeviceManagementException {
boolean status;
try {
if (log.isDebugEnabled()) {
log.debug("Dis-enrolling Android device : " + deviceId);
}
AndroidDAOFactory.beginTransaction();
status = daoFactory.getMobileDeviceDAO().deleteMobileDevice(deviceId.getId());
AndroidDAOFactory.commitTransaction();
} catch (MobileDeviceManagementDAOException e) {
try {
AndroidDAOFactory.rollbackTransaction();
} catch (MobileDeviceManagementDAOException mobileDAOEx) {
String msg = "Error occurred while roll back the device dis enrol transaction :" +
deviceId.toString();
log.warn(msg, mobileDAOEx);
}
String msg = "Error while removing the Android device : " + deviceId.getId();
log.error(msg, e);
throw new DeviceManagementException(msg, e);
}
return status;
}
@Override
public boolean isEnrolled(DeviceIdentifier deviceId) throws DeviceManagementException {
boolean isEnrolled = false;
try {
if (log.isDebugEnabled()) {
log.debug("Checking the enrollment of Android device : " + deviceId.getId());
}
MobileDevice mobileDevice =
daoFactory.getMobileDeviceDAO().getMobileDevice(deviceId.getId());
if (mobileDevice != null) {
isEnrolled = true;
}
} catch (MobileDeviceManagementDAOException e) {
String msg = "Error while checking the enrollment status of Android device : " +
deviceId.getId();
log.error(msg, e);
throw new DeviceManagementException(msg, e);
}
return isEnrolled;
}
@Override
public boolean isActive(DeviceIdentifier deviceId) throws DeviceManagementException {
return true;
}
@Override
public boolean setActive(DeviceIdentifier deviceId, boolean status)
throws DeviceManagementException {
return true;
}
@Override
public Device getDevice(DeviceIdentifier deviceId) throws DeviceManagementException {
Device device;
try {
if (log.isDebugEnabled()) {
log.debug("Getting the details of Android device : '" + deviceId.getId() + "'");
}
MobileDevice mobileDevice = daoFactory.getMobileDeviceDAO().
getMobileDevice(deviceId.getId());
device = MobileDeviceManagementUtil.convertToDevice(mobileDevice);
} catch (MobileDeviceManagementDAOException e) {
throw new DeviceManagementException(
"Error occurred while fetching the Android device: '" +
deviceId.getId() + "'", e);
}
return device;
}
@Override
public boolean setOwnership(DeviceIdentifier deviceId, String ownershipType)
throws DeviceManagementException {
return true;
}
@Override
public boolean isClaimable(DeviceIdentifier deviceIdentifier) throws DeviceManagementException {
return false;
}
@Override
public boolean setStatus(DeviceIdentifier deviceIdentifier, String currentUser,
EnrolmentInfo.Status status) throws DeviceManagementException {
return false;
}
@Override
public License getLicense(String languageCode) throws LicenseManagementException {
return licenseManager
.getLicense(AndroidDeviceManagementService.DEVICE_TYPE_ANDROID, languageCode);
}
@Override
public void addLicense(License license) throws LicenseManagementException {
licenseManager.addLicense(AndroidDeviceManagementService.DEVICE_TYPE_ANDROID, license);
}
@Override
public boolean updateDeviceInfo(DeviceIdentifier deviceIdentifier, Device device)
throws DeviceManagementException {
boolean status;
Device existingDevice = this.getDevice(deviceIdentifier);
// This object holds the current persisted device object
MobileDevice existingMobileDevice =
MobileDeviceManagementUtil.convertToMobileDevice(existingDevice);
// This object holds the newly received device object from response
MobileDevice mobileDevice = MobileDeviceManagementUtil.convertToMobileDevice(device);
// Updating current object features using newer ones
existingMobileDevice.setLatitude(mobileDevice.getLatitude());
existingMobileDevice.setLongitude(mobileDevice.getLongitude());
existingMobileDevice.setDeviceProperties(mobileDevice.getDeviceProperties());
try {
if (log.isDebugEnabled()) {
log.debug(
"updating the details of Android device : " + device.getDeviceIdentifier());
}
AndroidDAOFactory.beginTransaction();
status = daoFactory.getMobileDeviceDAO()
.updateMobileDevice(existingMobileDevice);
AndroidDAOFactory.commitTransaction();
} catch (MobileDeviceManagementDAOException e) {
try {
AndroidDAOFactory.rollbackTransaction();
} catch (MobileDeviceManagementDAOException e1) {
log.warn("Error occurred while roll back the update device info transaction : '" +
device.toString() + "'", e1);
}
throw new DeviceManagementException(
"Error occurred while updating the Android device: '" +
device.getDeviceIdentifier() + "'", e);
}
return status;
}
@Override
public List<Device> getAllDevices() throws DeviceManagementException {
List<Device> devices = null;
try {
if (log.isDebugEnabled()) {
log.debug("Fetching the details of all Android devices");
}
List<MobileDevice> mobileDevices =
daoFactory.getMobileDeviceDAO().getAllMobileDevices();
if (mobileDevices != null) {
devices = new ArrayList<>();
for (MobileDevice mobileDevice : mobileDevices) {
devices.add(MobileDeviceManagementUtil.convertToDevice(mobileDevice));
}
}
} catch (MobileDeviceManagementDAOException e) {
throw new DeviceManagementException("Error occurred while fetching all Android devices",
e);
}
return devices;
}
}
| apache-2.0 |
charles-cooper/idylfin | src/com/opengamma/analytics/financial/instrument/FixedReceiveCashFlowVisitor.java | 20143 | /**
* Copyright (C) 2012 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.analytics.financial.instrument;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import javax.time.calendar.LocalDate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.opengamma.analytics.financial.forex.definition.ForexDefinition;
import com.opengamma.analytics.financial.forex.definition.ForexNonDeliverableForwardDefinition;
import com.opengamma.analytics.financial.instrument.annuity.AnnuityDefinition;
import com.opengamma.analytics.financial.instrument.cash.CashDefinition;
import com.opengamma.analytics.financial.instrument.fra.ForwardRateAgreementDefinition;
import com.opengamma.analytics.financial.instrument.payment.CouponFixedDefinition;
import com.opengamma.analytics.financial.instrument.payment.CouponIborDefinition;
import com.opengamma.analytics.financial.instrument.payment.CouponIborSpreadDefinition;
import com.opengamma.analytics.financial.instrument.payment.PaymentDefinition;
import com.opengamma.analytics.financial.instrument.payment.PaymentFixedDefinition;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedIborDefinition;
import com.opengamma.analytics.financial.instrument.swap.SwapFixedIborSpreadDefinition;
import com.opengamma.util.ArgumentChecker;
import com.opengamma.util.money.MultipleCurrencyAmount;
import com.opengamma.util.timeseries.DoubleTimeSeries;
/**
* Returns all of the known receive cash-flows, including floating payments that have fixed.
* The payments are always positive.
*/
public final class FixedReceiveCashFlowVisitor extends InstrumentDefinitionVisitorSameValueAdapter<DoubleTimeSeries<LocalDate>, Map<LocalDate, MultipleCurrencyAmount>> {
private static final Logger s_logger = LoggerFactory.getLogger(FixedReceiveCashFlowVisitor.class);
private static final FixedReceiveCashFlowVisitor INSTANCE = new FixedReceiveCashFlowVisitor();
public static FixedReceiveCashFlowVisitor getInstance() {
return INSTANCE;
}
private FixedReceiveCashFlowVisitor() {
super(Collections.<LocalDate, MultipleCurrencyAmount>emptyMap());
}
/**
* If the notional is negative (i.e. an amount is to be paid), returns an empty map.
* Otherwise, returns a map containing a single payment date and amount to be received.
* @param cash The cash definition, not null
* @return A map containing the (single) payment date and amount, or an empty map, as appropriate
*/
@Override
public Map<LocalDate, MultipleCurrencyAmount> visitCashDefinition(final CashDefinition cash) {
ArgumentChecker.notNull(cash, "cash");
if (cash.getNotional() < 0) {
return Collections.emptyMap();
}
final LocalDate endDate = cash.getEndDate().toLocalDate();
return Collections.singletonMap(endDate, MultipleCurrencyAmount.of(cash.getCurrency(), cash.getInterestAmount()));
}
/**
* If the notional is negative (i.e. an amount is to be paid), returns an empty map.
* Otherwise, returns a map containing a single payment date and amount to be received.
* @param cash The cash definition, not null
* @param indexFixingTimeSeries Not used
* @return A map containing the (single) payment date and amount, or an empty map, as appropriate
*/
@Override
public Map<LocalDate, MultipleCurrencyAmount> visitCashDefinition(final CashDefinition cash, final DoubleTimeSeries<LocalDate> indexFixingTimeSeries) {
s_logger.info("An index fixing time series was supplied, but will not be used");
return visitCashDefinition(cash);
}
/**
* If the notional is negative (i.e. the payment is to be paid), returns an empty map.
* Otherwise, returns a map containing a single payment date and amount to be received.
* @param payment The payment, not null
* @return A map containing the (single) payment date and amount
*/
@Override
public Map<LocalDate, MultipleCurrencyAmount> visitPaymentFixedDefinition(final PaymentFixedDefinition payment) {
ArgumentChecker.notNull(payment, "payment");
if (payment.getReferenceAmount() < 0) {
return Collections.emptyMap();
}
final LocalDate endDate = payment.getPaymentDate().toLocalDate();
return Collections.singletonMap(endDate, MultipleCurrencyAmount.of(payment.getCurrency(), payment.getReferenceAmount()));
}
/**
* If the notional is negative (i.e. the payment is to be paid), returns an empty map.
* Otherwise, returns a map containing a single payment date and amount to be received.
* @param payment The payment, not null
* @param indexFixingTimeSeries Not used
* @return A map containing the (single) payment date and amount
*/
@Override
public Map<LocalDate, MultipleCurrencyAmount> visitPaymentFixedDefinition(final PaymentFixedDefinition payment, final DoubleTimeSeries<LocalDate> indexFixingTimeSeries) {
s_logger.info("An index fixing time series was supplied, but will not be used");
return visitPaymentFixedDefinition(payment);
}
/**
* If the notional is negative (i.e. the coupon will be paid), returns an empty map.
* Otherwise, returns a map containing a single payment date and amount to be received.
* @param coupon The fixed coupon, not null
* @return A map containing the (single) payment date and amount
*/
@Override
public Map<LocalDate, MultipleCurrencyAmount> visitCouponFixedDefinition(final CouponFixedDefinition coupon) {
ArgumentChecker.notNull(coupon, "coupon");
if (coupon.getNotional() < 0) {
return Collections.emptyMap();
}
final LocalDate endDate = coupon.getPaymentDate().toLocalDate();
return Collections.singletonMap(endDate, MultipleCurrencyAmount.of(coupon.getCurrency(), coupon.getAmount()));
}
/**
* If the notional is negative (i.e. the coupon will be paid), returns an empty map.
* Otherwise, returns a map containing a single payment date and amount to be received.
* @param coupon The fixed coupon, not null
* @param indexFixingTimeSeries Not used
* @return A map containing the (single) payment date and amount
*/
@Override
public Map<LocalDate, MultipleCurrencyAmount> visitCouponFixedDefinition(final CouponFixedDefinition coupon, final DoubleTimeSeries<LocalDate> indexFixingTimeSeries) {
s_logger.info("An index fixing time series was supplied, but will not be used");
return visitCouponFixedDefinition(coupon);
}
@Override
public Map<LocalDate, MultipleCurrencyAmount> visitCouponIborDefinition(final CouponIborDefinition coupon) {
return visitCouponIborDefinition(coupon, null);
}
/**
* If the notional is negative (i.e. the coupon will be paid), returns an empty map.
* If the fixing date is before the last date in the index fixing time series (i.e. the fixing has taken place),
* returns a map containing a simple payment date and amount to be paid. Otherwise, returns
* an empty map.
* @param coupon The floating coupon, not null
* @param indexFixingTimeSeries The fixing time series, not null if the coupon is to be paid.
* @return A map containing the (single) payment date and amount if fixing has taken place, otherwise an empty map
*/
@Override
public Map<LocalDate, MultipleCurrencyAmount> visitCouponIborDefinition(final CouponIborDefinition coupon, final DoubleTimeSeries<LocalDate> indexFixingTimeSeries) {
ArgumentChecker.notNull(coupon, "coupon");
if (coupon.getNotional() < 0) {
return Collections.emptyMap();
}
ArgumentChecker.notNull(indexFixingTimeSeries, "index fixing time series");
final LocalDate fixingDate = coupon.getFixingDate().toLocalDate();
if (!indexFixingTimeSeries.getLatestTime().isBefore(fixingDate)) {
final LocalDate endDate = coupon.getPaymentDate().toLocalDate();
if (indexFixingTimeSeries.getValue(fixingDate) != null) {
final double fixedRate = indexFixingTimeSeries.getValue(fixingDate);
final double payment = coupon.getNotional() * coupon.getPaymentYearFraction() * fixedRate;
return Collections.singletonMap(endDate, MultipleCurrencyAmount.of(coupon.getCurrency(), payment));
}
throw new IllegalArgumentException("Could not get fixing value for date " + fixingDate);
}
return Collections.emptyMap();
}
@Override
public Map<LocalDate, MultipleCurrencyAmount> visitCouponIborSpreadDefinition(final CouponIborSpreadDefinition coupon) {
return visitCouponIborSpreadDefinition(coupon, null);
}
/**
* If the notional is negative (i.e. the coupon will be paid), returns an empty map.
* If the fixing date is before the last date in the index fixing time series (i.e. the fixing has taken place),
* returns a map containing a simple payment date and amount to be paid. Otherwise, returns
* an empty map.
* @param coupon The floating coupon, not null
* @param indexFixingTimeSeries The fixing time series, not null if the coupon is to be paid.
* @return A map containing the (single) payment date and amount if fixing has taken place, otherwise an empty map
*/
@Override
public Map<LocalDate, MultipleCurrencyAmount> visitCouponIborSpreadDefinition(final CouponIborSpreadDefinition coupon,
final DoubleTimeSeries<LocalDate> indexFixingTimeSeries) {
ArgumentChecker.notNull(coupon, "coupon");
if (coupon.getNotional() < 0) {
return Collections.emptyMap();
}
ArgumentChecker.notNull(indexFixingTimeSeries, "index fixing time series");
final LocalDate fixingDate = coupon.getFixingDate().toLocalDate();
if (!indexFixingTimeSeries.getLatestTime().isBefore(fixingDate)) {
final LocalDate endDate = coupon.getPaymentDate().toLocalDate();
if (indexFixingTimeSeries.getValue(fixingDate) != null) {
final double fixedRate = indexFixingTimeSeries.getValue(fixingDate);
final double payment = coupon.getNotional() * coupon.getPaymentYearFraction() * (fixedRate + coupon.getSpread());
return Collections.singletonMap(endDate, MultipleCurrencyAmount.of(coupon.getCurrency(), payment));
}
throw new IllegalArgumentException("Could not get fixing value for date " + fixingDate);
}
return Collections.emptyMap();
}
/**
* If the FRA is a receiver, returns a map containing a single payment. Otherwise, throws an exception (as the index fixing series is needed).
* @param forwardRateAgreement The FRA, not null
* @return A map containing the (single) payment date and amount
*/
@Override
public Map<LocalDate, MultipleCurrencyAmount> visitForwardRateAgreementDefinition(final ForwardRateAgreementDefinition forwardRateAgreement) {
ArgumentChecker.notNull(forwardRateAgreement, "FRA");
ArgumentChecker.isTrue(forwardRateAgreement.getNotional() < 0, "Receive floating FRAs need an index fixing time series to find receive cash flows");
return visitForwardRateAgreementDefinition(forwardRateAgreement, null);
}
/**
* If the FRA is a receiver, or if the FRA is a payer and the fixing date is before the last date in the index fixing time series (i.e. the fixing has taken place),
* returns a map containing a single payment.
* @param forwardRateAgreement The FRA, not null
* @param indexFixingTimeSeries The fixing time series for the floating index, not null if the FRA is a receiver
* @return A map containing the (single) payment date and amount
*/
@Override
public Map<LocalDate, MultipleCurrencyAmount> visitForwardRateAgreementDefinition(final ForwardRateAgreementDefinition forwardRateAgreement,
final DoubleTimeSeries<LocalDate> indexFixingTimeSeries) {
ArgumentChecker.notNull(forwardRateAgreement, "FRA");
final LocalDate endDate = forwardRateAgreement.getPaymentDate().toLocalDate();
if (forwardRateAgreement.getNotional() > 0) {
ArgumentChecker.notNull(indexFixingTimeSeries, "index fixing time series");
final LocalDate fixingDate = forwardRateAgreement.getFixingDate().toLocalDate();
if (!indexFixingTimeSeries.getLatestTime().isBefore(fixingDate)) {
if (indexFixingTimeSeries.getValue(fixingDate) != null) {
final double fixedRate = indexFixingTimeSeries.getValue(fixingDate);
final double payment = forwardRateAgreement.getPaymentYearFraction() * forwardRateAgreement.getNotional() * fixedRate;
return Collections.singletonMap(endDate, MultipleCurrencyAmount.of(forwardRateAgreement.getCurrency(), payment));
}
throw new IllegalArgumentException("Could not get fixing value for " + fixingDate);
}
return Collections.emptyMap();
}
final double payment = -forwardRateAgreement.getReferenceAmount() * forwardRateAgreement.getRate() * forwardRateAgreement.getFixingPeriodAccrualFactor();
return Collections.singletonMap(endDate, MultipleCurrencyAmount.of(forwardRateAgreement.getCurrency(), payment));
}
/**
* Returns a map containing all of the known payments to be received in an annuity. If there are no payments to be received, an empty map is returned.
* @param annuity The annuity, not null
* @param indexFixingTimeSeries The fixing time series for the floating index, not null
* @return A map containing the payment dates and amounts
*/
@Override
public Map<LocalDate, MultipleCurrencyAmount> visitAnnuityDefinition(final AnnuityDefinition<? extends PaymentDefinition> annuity,
final DoubleTimeSeries<LocalDate> indexFixingTimeSeries) {
ArgumentChecker.notNull(annuity, "annuity");
return getDatesAndPaymentsFromAnnuity(annuity, indexFixingTimeSeries);
}
/**
* If the swap is a receiver, returns a map containing all of the fixed payments. Otherwise, throws an exception (as the index fixing series is needed).
* @param swap The swap, not null
* @return A map containing the fixed payment dates and amounts
*/
@Override
public Map<LocalDate, MultipleCurrencyAmount> visitSwapFixedIborDefinition(final SwapFixedIborDefinition swap) {
ArgumentChecker.notNull(swap, "swap");
ArgumentChecker.isFalse(swap.getFixedLeg().isPayer(), "Payer swaps need an index fixing series to calculate receive cash-flows");
return visitSwapFixedIborDefinition(swap, null);
}
/**
* If the swap is a receiver, returns a map containing all of the fixed payments. If the swap is a payer, returns a map containing
* all of the payment amounts that have been fixed.
* @param swap The swap, not null
* @param indexFixingTimeSeries The fixing time series for the floating index, not null
* @return A map containing the payment dates and amounts
*/
@Override
public Map<LocalDate, MultipleCurrencyAmount> visitSwapFixedIborDefinition(final SwapFixedIborDefinition swap, final DoubleTimeSeries<LocalDate> indexFixingTimeSeries) {
ArgumentChecker.notNull(swap, "swap");
if (swap.getFixedLeg().isPayer()) {
ArgumentChecker.notNull(indexFixingTimeSeries, "index fixing time series");
return swap.getIborLeg().accept(this, indexFixingTimeSeries);
}
return swap.getFixedLeg().accept(this, indexFixingTimeSeries);
}
/**
* If the swap is a receiver, returns a map containing all of the fixed payments. Otherwise, throws an exception (as the index fixing series is needed).
* @param swap The swap, not null
* @return A map containing the fixed payment dates and amounts
*/
@Override
public Map<LocalDate, MultipleCurrencyAmount> visitSwapFixedIborSpreadDefinition(final SwapFixedIborSpreadDefinition swap) {
ArgumentChecker.notNull(swap, "swap");
ArgumentChecker.isFalse(swap.getFixedLeg().isPayer(), "Payer swaps need an index fixing series to calculate receive cash-flows");
return visitSwapFixedIborSpreadDefinition(swap, null);
}
/**
* If the swap is a receiver, returns a map containing all of the fixed payments. If the swap is a payer, returns a map containing
* all of the payments amounts that have been fixed.
* @param swap The swap, not null
* @param indexFixingTimeSeries The fixing time series for the floating index, not null
* @return A map containing the payment dates and amounts
*/
@Override
public Map<LocalDate, MultipleCurrencyAmount> visitSwapFixedIborSpreadDefinition(final SwapFixedIborSpreadDefinition swap,
final DoubleTimeSeries<LocalDate> indexFixingTimeSeries) {
ArgumentChecker.notNull(swap, "swap");
if (swap.getFixedLeg().isPayer()) {
ArgumentChecker.notNull(indexFixingTimeSeries, "index fixing time series");
return swap.getIborLeg().accept(this, indexFixingTimeSeries);
}
return swap.getFixedLeg().accept(this, indexFixingTimeSeries);
}
/**
* Returns a map containing a single date and payment.
* @param fx The FX instrument, not null
* @return A map containing the (single) payment date and amount
*/
@Override
public Map<LocalDate, MultipleCurrencyAmount> visitForexDefinition(final ForexDefinition fx) {
ArgumentChecker.notNull(fx, "fx");
if (fx.getPaymentCurrency1().getReferenceAmount() > 0) {
return fx.getPaymentCurrency1().accept(this);
}
return fx.getPaymentCurrency2().accept(this);
}
/**
* Returns a map containing a single date and payment.
* @param fx The FX instrument, not null
* @param indexFixingTimeSeries Not used
* @return A map containing the (single) payment date and amount
*/
@Override
public Map<LocalDate, MultipleCurrencyAmount> visitForexDefinition(final ForexDefinition fx, final DoubleTimeSeries<LocalDate> indexFixingTimeSeries) {
s_logger.info("An index fixing time series was supplied, but will not be used");
return visitForexDefinition(fx);
}
/**
* If the cash settlement amount is positive (i.e. it will be received), returns a map containing a single date and payment. Otherwise, returns
* an empty map.
* @param ndf The NDF, not null
* @return A map containing the (single) payment date and amount
*/
@Override
public Map<LocalDate, MultipleCurrencyAmount> visitForexNonDeliverableForwardDefinition(final ForexNonDeliverableForwardDefinition ndf) {
ArgumentChecker.notNull(ndf, "ndf");
if (ndf.getNotional() < 0) {
return Collections.emptyMap();
}
final LocalDate endDate = ndf.getPaymentDate().toLocalDate();
return Collections.singletonMap(endDate, MultipleCurrencyAmount.of(ndf.getCurrency2(), Math.abs(ndf.getNotional())));
}
/**
* If the cash settlement amount is positive (i.e. it will be received), returns a map containing a single date and payment. Otherwise, returns
* an empty map.
* @param ndf The NDF, not null
* @param indexFixingTimeSeries Not used
* @return A map containing the (single) payment date and amount
*/
@Override
public Map<LocalDate, MultipleCurrencyAmount> visitForexNonDeliverableForwardDefinition(final ForexNonDeliverableForwardDefinition ndf,
final DoubleTimeSeries<LocalDate> indexFixingTimeSeries) {
s_logger.info("An index fixing time series was supplied but will not be used");
return visitForexNonDeliverableForwardDefinition(ndf);
}
private Map<LocalDate, MultipleCurrencyAmount> getDatesAndPaymentsFromAnnuity(final AnnuityDefinition<? extends PaymentDefinition> annuity,
final DoubleTimeSeries<LocalDate> indexFixingTimeSeries) {
final Map<LocalDate, MultipleCurrencyAmount> result = new HashMap<LocalDate, MultipleCurrencyAmount>();
for (final PaymentDefinition payment : annuity.getPayments()) {
final Map<LocalDate, MultipleCurrencyAmount> payments = payment.accept(this, indexFixingTimeSeries);
for (final Map.Entry<LocalDate, MultipleCurrencyAmount> entry : payments.entrySet()) {
final int scale = entry.getValue().getCurrencyAmounts()[0].getAmount() < 0 ? -1 : 1;
final MultipleCurrencyAmount mca = entry.getValue().multipliedBy(scale);
final LocalDate key = entry.getKey();
if (result.containsKey(key)) {
result.put(key, result.get(key).plus(mca));
} else {
result.put(key, mca);
}
}
}
return result;
}
}
| apache-2.0 |
vikkyrk/incubator-beam | runners/direct-java/src/test/java/org/apache/beam/runners/direct/TestStreamEvaluatorFactoryTest.java | 8480 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.beam.runners.direct;
import static org.hamcrest.Matchers.equalTo;
import static org.junit.Assert.assertThat;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import com.google.common.collect.Iterables;
import java.util.Collection;
import java.util.Collections;
import org.apache.beam.runners.direct.DirectRunner.CommittedBundle;
import org.apache.beam.runners.direct.TestStreamEvaluatorFactory.DirectTestStreamFactory.DirectTestStream;
import org.apache.beam.runners.direct.TestStreamEvaluatorFactory.TestClock;
import org.apache.beam.runners.direct.TestStreamEvaluatorFactory.TestStreamIndex;
import org.apache.beam.sdk.coders.VarIntCoder;
import org.apache.beam.sdk.testing.TestPipeline;
import org.apache.beam.sdk.testing.TestStream;
import org.apache.beam.sdk.transforms.AppliedPTransform;
import org.apache.beam.sdk.transforms.windowing.BoundedWindow;
import org.apache.beam.sdk.util.WindowedValue;
import org.apache.beam.sdk.values.PCollection;
import org.apache.beam.sdk.values.TimestampedValue;
import org.hamcrest.Matchers;
import org.joda.time.Duration;
import org.joda.time.Instant;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
/** Tests for {@link TestStreamEvaluatorFactory}. */
@RunWith(JUnit4.class)
public class TestStreamEvaluatorFactoryTest {
private TestStreamEvaluatorFactory factory;
private BundleFactory bundleFactory;
private EvaluationContext context;
@Rule
public TestPipeline p = TestPipeline.create().enableAbandonedNodeEnforcement(false);
private DirectRunner runner;
@Before
public void setup() {
context = mock(EvaluationContext.class);
runner = DirectRunner.fromOptions(TestPipeline.testingPipelineOptions());
factory = new TestStreamEvaluatorFactory(context);
bundleFactory = ImmutableListBundleFactory.create();
}
/** Demonstrates that returned evaluators produce elements in sequence. */
@Test
public void producesElementsInSequence() throws Exception {
TestStream<Integer> testStream = TestStream.create(VarIntCoder.of())
.addElements(1, 2, 3)
.advanceWatermarkTo(new Instant(0))
.addElements(TimestampedValue.atMinimumTimestamp(4),
TimestampedValue.atMinimumTimestamp(5),
TimestampedValue.atMinimumTimestamp(6))
.advanceProcessingTime(Duration.standardMinutes(10))
.advanceWatermarkToInfinity();
PCollection<Integer> streamVals =
p.apply(new DirectTestStream<Integer>(runner, testStream));
TestClock clock = new TestClock();
when(context.getClock()).thenReturn(clock);
when(context.createRootBundle()).thenReturn(bundleFactory.createRootBundle());
when(context.createBundle(streamVals))
.thenReturn(bundleFactory.createBundle(streamVals), bundleFactory.createBundle(streamVals));
AppliedPTransform<?, ?, ?> streamProducer = DirectGraphs.getProducer(streamVals);
Collection<CommittedBundle<?>> initialInputs =
new TestStreamEvaluatorFactory.InputProvider(context)
.getInitialInputs(streamProducer, 1);
@SuppressWarnings("unchecked")
CommittedBundle<TestStreamIndex<Integer>> initialBundle =
(CommittedBundle<TestStreamIndex<Integer>>) Iterables.getOnlyElement(initialInputs);
TransformEvaluator<TestStreamIndex<Integer>> firstEvaluator =
factory.forApplication(streamProducer, initialBundle);
firstEvaluator.processElement(Iterables.getOnlyElement(initialBundle.getElements()));
TransformResult<TestStreamIndex<Integer>> firstResult = firstEvaluator.finishBundle();
WindowedValue<TestStreamIndex<Integer>> firstResidual =
(WindowedValue<TestStreamIndex<Integer>>)
Iterables.getOnlyElement(firstResult.getUnprocessedElements());
assertThat(firstResidual.getValue().getIndex(), equalTo(1));
assertThat(firstResidual.getTimestamp(), equalTo(BoundedWindow.TIMESTAMP_MIN_VALUE));
CommittedBundle<TestStreamIndex<Integer>> secondBundle =
initialBundle.withElements(Collections.singleton(firstResidual));
TransformEvaluator<TestStreamIndex<Integer>> secondEvaluator =
factory.forApplication(streamProducer, secondBundle);
secondEvaluator.processElement(firstResidual);
TransformResult<TestStreamIndex<Integer>> secondResult = secondEvaluator.finishBundle();
WindowedValue<TestStreamIndex<Integer>> secondResidual =
(WindowedValue<TestStreamIndex<Integer>>)
Iterables.getOnlyElement(secondResult.getUnprocessedElements());
assertThat(secondResidual.getValue().getIndex(), equalTo(2));
assertThat(secondResidual.getTimestamp(), equalTo(new Instant(0)));
CommittedBundle<TestStreamIndex<Integer>> thirdBundle =
secondBundle.withElements(Collections.singleton(secondResidual));
TransformEvaluator<TestStreamIndex<Integer>> thirdEvaluator =
factory.forApplication(streamProducer, thirdBundle);
thirdEvaluator.processElement(secondResidual);
TransformResult<TestStreamIndex<Integer>> thirdResult = thirdEvaluator.finishBundle();
WindowedValue<TestStreamIndex<Integer>> thirdResidual =
(WindowedValue<TestStreamIndex<Integer>>)
Iterables.getOnlyElement(thirdResult.getUnprocessedElements());
assertThat(thirdResidual.getValue().getIndex(), equalTo(3));
assertThat(thirdResidual.getTimestamp(), equalTo(new Instant(0)));
Instant start = clock.now();
CommittedBundle<TestStreamIndex<Integer>> fourthBundle =
thirdBundle.withElements(Collections.singleton(thirdResidual));
TransformEvaluator<TestStreamIndex<Integer>> fourthEvaluator =
factory.forApplication(streamProducer, fourthBundle);
fourthEvaluator.processElement(thirdResidual);
TransformResult<TestStreamIndex<Integer>> fourthResult = fourthEvaluator.finishBundle();
assertThat(clock.now(), equalTo(start.plus(Duration.standardMinutes(10))));
WindowedValue<TestStreamIndex<Integer>> fourthResidual =
(WindowedValue<TestStreamIndex<Integer>>)
Iterables.getOnlyElement(fourthResult.getUnprocessedElements());
assertThat(fourthResidual.getValue().getIndex(), equalTo(4));
assertThat(fourthResidual.getTimestamp(), equalTo(new Instant(0)));
CommittedBundle<TestStreamIndex<Integer>> fifthBundle =
thirdBundle.withElements(Collections.singleton(fourthResidual));
TransformEvaluator<TestStreamIndex<Integer>> fifthEvaluator =
factory.forApplication(streamProducer, fifthBundle);
fifthEvaluator.processElement(fourthResidual);
TransformResult<TestStreamIndex<Integer>> fifthResult = fifthEvaluator.finishBundle();
assertThat(
Iterables.getOnlyElement(firstResult.getOutputBundles())
.commit(Instant.now())
.getElements(),
Matchers.<WindowedValue<?>>containsInAnyOrder(
WindowedValue.valueInGlobalWindow(1),
WindowedValue.valueInGlobalWindow(2),
WindowedValue.valueInGlobalWindow(3)));
assertThat(
Iterables.getOnlyElement(thirdResult.getOutputBundles())
.commit(Instant.now())
.getElements(),
Matchers.<WindowedValue<?>>containsInAnyOrder(
WindowedValue.valueInGlobalWindow(4),
WindowedValue.valueInGlobalWindow(5),
WindowedValue.valueInGlobalWindow(6)));
assertThat(fifthResult.getOutputBundles(), Matchers.emptyIterable());
assertThat(fifthResult.getWatermarkHold(), equalTo(BoundedWindow.TIMESTAMP_MAX_VALUE));
assertThat(fifthResult.getUnprocessedElements(), Matchers.emptyIterable());
}
}
| apache-2.0 |
zkidkid/elasticsearch | core/src/main/java/org/elasticsearch/search/SearchModule.java | 60021 | /*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search;
import org.apache.lucene.search.BooleanQuery;
import org.elasticsearch.common.NamedRegistry;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.geo.ShapesAvailability;
import org.elasticsearch.common.geo.builders.ShapeBuilders;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.io.stream.NamedWriteable;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.io.stream.Writeable.Reader;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ParseFieldRegistry;
import org.elasticsearch.index.query.BoolQueryBuilder;
import org.elasticsearch.index.query.BoostingQueryBuilder;
import org.elasticsearch.index.query.CommonTermsQueryBuilder;
import org.elasticsearch.index.query.ConstantScoreQueryBuilder;
import org.elasticsearch.index.query.DisMaxQueryBuilder;
import org.elasticsearch.index.query.ExistsQueryBuilder;
import org.elasticsearch.index.query.FieldMaskingSpanQueryBuilder;
import org.elasticsearch.index.query.FuzzyQueryBuilder;
import org.elasticsearch.index.query.GeoBoundingBoxQueryBuilder;
import org.elasticsearch.index.query.GeoDistanceQueryBuilder;
import org.elasticsearch.index.query.GeoDistanceRangeQueryBuilder;
import org.elasticsearch.index.query.GeoPolygonQueryBuilder;
import org.elasticsearch.index.query.GeoShapeQueryBuilder;
import org.elasticsearch.index.query.GeohashCellQuery;
import org.elasticsearch.index.query.HasChildQueryBuilder;
import org.elasticsearch.index.query.HasParentQueryBuilder;
import org.elasticsearch.index.query.IdsQueryBuilder;
import org.elasticsearch.index.query.IndicesQueryBuilder;
import org.elasticsearch.index.query.MatchAllQueryBuilder;
import org.elasticsearch.index.query.MatchNoneQueryBuilder;
import org.elasticsearch.index.query.MatchPhrasePrefixQueryBuilder;
import org.elasticsearch.index.query.MatchPhraseQueryBuilder;
import org.elasticsearch.index.query.MatchQueryBuilder;
import org.elasticsearch.index.query.MoreLikeThisQueryBuilder;
import org.elasticsearch.index.query.MultiMatchQueryBuilder;
import org.elasticsearch.index.query.NestedQueryBuilder;
import org.elasticsearch.index.query.ParentIdQueryBuilder;
import org.elasticsearch.index.query.PrefixQueryBuilder;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryStringQueryBuilder;
import org.elasticsearch.index.query.RangeQueryBuilder;
import org.elasticsearch.index.query.RegexpQueryBuilder;
import org.elasticsearch.index.query.ScriptQueryBuilder;
import org.elasticsearch.index.query.SimpleQueryStringBuilder;
import org.elasticsearch.index.query.SpanContainingQueryBuilder;
import org.elasticsearch.index.query.SpanFirstQueryBuilder;
import org.elasticsearch.index.query.SpanMultiTermQueryBuilder;
import org.elasticsearch.index.query.SpanNearQueryBuilder;
import org.elasticsearch.index.query.SpanNotQueryBuilder;
import org.elasticsearch.index.query.SpanOrQueryBuilder;
import org.elasticsearch.index.query.SpanTermQueryBuilder;
import org.elasticsearch.index.query.SpanWithinQueryBuilder;
import org.elasticsearch.index.query.TermQueryBuilder;
import org.elasticsearch.index.query.TermsQueryBuilder;
import org.elasticsearch.index.query.TypeQueryBuilder;
import org.elasticsearch.index.query.WildcardQueryBuilder;
import org.elasticsearch.index.query.WrapperQueryBuilder;
import org.elasticsearch.index.query.functionscore.ExponentialDecayFunctionBuilder;
import org.elasticsearch.index.query.functionscore.FieldValueFactorFunctionBuilder;
import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder;
import org.elasticsearch.index.query.functionscore.GaussDecayFunctionBuilder;
import org.elasticsearch.index.query.functionscore.LinearDecayFunctionBuilder;
import org.elasticsearch.index.query.functionscore.RandomScoreFunctionBuilder;
import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder;
import org.elasticsearch.index.query.functionscore.ScoreFunctionParser;
import org.elasticsearch.index.query.functionscore.ScriptScoreFunctionBuilder;
import org.elasticsearch.index.query.functionscore.WeightBuilder;
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
import org.elasticsearch.plugins.SearchPlugin;
import org.elasticsearch.plugins.SearchPlugin.FetchPhaseConstructionContext;
import org.elasticsearch.plugins.SearchPlugin.QuerySpec;
import org.elasticsearch.plugins.SearchPlugin.ScoreFunctionSpec;
import org.elasticsearch.plugins.SearchPlugin.SearchExtensionSpec;
import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.aggregations.AggregationBuilder;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.AggregatorParsers;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.PipelineAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.children.ChildrenAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.children.InternalChildren;
import org.elasticsearch.search.aggregations.bucket.filter.FilterAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.filter.InternalFilter;
import org.elasticsearch.search.aggregations.bucket.filters.FiltersAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.filters.InternalFilters;
import org.elasticsearch.search.aggregations.bucket.geogrid.GeoGridAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGridParser;
import org.elasticsearch.search.aggregations.bucket.geogrid.InternalGeoHashGrid;
import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.global.InternalGlobal;
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramParser;
import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.histogram.HistogramParser;
import org.elasticsearch.search.aggregations.bucket.histogram.InternalDateHistogram;
import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram;
import org.elasticsearch.search.aggregations.bucket.missing.InternalMissing;
import org.elasticsearch.search.aggregations.bucket.missing.MissingAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.missing.MissingParser;
import org.elasticsearch.search.aggregations.bucket.nested.InternalNested;
import org.elasticsearch.search.aggregations.bucket.nested.InternalReverseNested;
import org.elasticsearch.search.aggregations.bucket.nested.NestedAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.nested.ReverseNestedAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.range.InternalBinaryRange;
import org.elasticsearch.search.aggregations.bucket.range.InternalRange;
import org.elasticsearch.search.aggregations.bucket.range.RangeAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.range.RangeParser;
import org.elasticsearch.search.aggregations.bucket.range.date.DateRangeAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.range.date.DateRangeParser;
import org.elasticsearch.search.aggregations.bucket.range.date.InternalDateRange;
import org.elasticsearch.search.aggregations.bucket.range.geodistance.GeoDistanceAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.range.geodistance.GeoDistanceParser;
import org.elasticsearch.search.aggregations.bucket.range.geodistance.InternalGeoDistance;
import org.elasticsearch.search.aggregations.bucket.range.ip.IpRangeAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.range.ip.IpRangeParser;
import org.elasticsearch.search.aggregations.bucket.sampler.DiversifiedAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.sampler.DiversifiedSamplerParser;
import org.elasticsearch.search.aggregations.bucket.sampler.InternalSampler;
import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.sampler.UnmappedSampler;
import org.elasticsearch.search.aggregations.bucket.significant.SignificantLongTerms;
import org.elasticsearch.search.aggregations.bucket.significant.SignificantStringTerms;
import org.elasticsearch.search.aggregations.bucket.significant.SignificantTermsAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.significant.SignificantTermsParser;
import org.elasticsearch.search.aggregations.bucket.significant.UnmappedSignificantTerms;
import org.elasticsearch.search.aggregations.bucket.significant.heuristics.ChiSquare;
import org.elasticsearch.search.aggregations.bucket.significant.heuristics.GND;
import org.elasticsearch.search.aggregations.bucket.significant.heuristics.JLHScore;
import org.elasticsearch.search.aggregations.bucket.significant.heuristics.MutualInformation;
import org.elasticsearch.search.aggregations.bucket.significant.heuristics.PercentageScore;
import org.elasticsearch.search.aggregations.bucket.significant.heuristics.ScriptHeuristic;
import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristic;
import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristicParser;
import org.elasticsearch.search.aggregations.bucket.terms.DoubleTerms;
import org.elasticsearch.search.aggregations.bucket.terms.LongTerms;
import org.elasticsearch.search.aggregations.bucket.terms.StringTerms;
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.terms.TermsParser;
import org.elasticsearch.search.aggregations.bucket.terms.UnmappedTerms;
import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.avg.AvgParser;
import org.elasticsearch.search.aggregations.metrics.avg.InternalAvg;
import org.elasticsearch.search.aggregations.metrics.cardinality.CardinalityAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.cardinality.CardinalityParser;
import org.elasticsearch.search.aggregations.metrics.cardinality.InternalCardinality;
import org.elasticsearch.search.aggregations.metrics.geobounds.GeoBoundsAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.geobounds.GeoBoundsParser;
import org.elasticsearch.search.aggregations.metrics.geobounds.InternalGeoBounds;
import org.elasticsearch.search.aggregations.metrics.geocentroid.GeoCentroidAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.geocentroid.GeoCentroidParser;
import org.elasticsearch.search.aggregations.metrics.geocentroid.InternalGeoCentroid;
import org.elasticsearch.search.aggregations.metrics.max.InternalMax;
import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.max.MaxParser;
import org.elasticsearch.search.aggregations.metrics.min.InternalMin;
import org.elasticsearch.search.aggregations.metrics.min.MinAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.min.MinParser;
import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanksAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanksParser;
import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesParser;
import org.elasticsearch.search.aggregations.metrics.percentiles.hdr.InternalHDRPercentileRanks;
import org.elasticsearch.search.aggregations.metrics.percentiles.hdr.InternalHDRPercentiles;
import org.elasticsearch.search.aggregations.metrics.percentiles.tdigest.InternalTDigestPercentileRanks;
import org.elasticsearch.search.aggregations.metrics.percentiles.tdigest.InternalTDigestPercentiles;
import org.elasticsearch.search.aggregations.metrics.scripted.InternalScriptedMetric;
import org.elasticsearch.search.aggregations.metrics.scripted.ScriptedMetricAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.stats.InternalStats;
import org.elasticsearch.search.aggregations.metrics.stats.StatsAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.stats.StatsParser;
import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStatsAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStatsParser;
import org.elasticsearch.search.aggregations.metrics.stats.extended.InternalExtendedStats;
import org.elasticsearch.search.aggregations.metrics.sum.InternalSum;
import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.sum.SumParser;
import org.elasticsearch.search.aggregations.metrics.tophits.InternalTopHits;
import org.elasticsearch.search.aggregations.metrics.tophits.TopHitsAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.valuecount.InternalValueCount;
import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCountAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCountParser;
import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.InternalBucketMetricValue;
import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.avg.AvgBucketPipelineAggregationBuilder;
import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.avg.AvgBucketPipelineAggregator;
import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.max.MaxBucketPipelineAggregationBuilder;
import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.max.MaxBucketPipelineAggregator;
import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.min.MinBucketPipelineAggregationBuilder;
import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.min.MinBucketPipelineAggregator;
import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.percentile.InternalPercentilesBucket;
import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.percentile.PercentilesBucketPipelineAggregationBuilder;
import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.percentile.PercentilesBucketPipelineAggregator;
import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.InternalStatsBucket;
import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.StatsBucketPipelineAggregationBuilder;
import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.StatsBucketPipelineAggregator;
import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.extended.ExtendedStatsBucketParser;
import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.extended.ExtendedStatsBucketPipelineAggregationBuilder;
import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.extended.ExtendedStatsBucketPipelineAggregator;
import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.extended.InternalExtendedStatsBucket;
import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.sum.SumBucketPipelineAggregationBuilder;
import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.sum.SumBucketPipelineAggregator;
import org.elasticsearch.search.aggregations.pipeline.bucketscript.BucketScriptPipelineAggregationBuilder;
import org.elasticsearch.search.aggregations.pipeline.bucketscript.BucketScriptPipelineAggregator;
import org.elasticsearch.search.aggregations.pipeline.bucketselector.BucketSelectorPipelineAggregationBuilder;
import org.elasticsearch.search.aggregations.pipeline.bucketselector.BucketSelectorPipelineAggregator;
import org.elasticsearch.search.aggregations.pipeline.cumulativesum.CumulativeSumPipelineAggregationBuilder;
import org.elasticsearch.search.aggregations.pipeline.cumulativesum.CumulativeSumPipelineAggregator;
import org.elasticsearch.search.aggregations.pipeline.derivative.DerivativePipelineAggregationBuilder;
import org.elasticsearch.search.aggregations.pipeline.derivative.DerivativePipelineAggregator;
import org.elasticsearch.search.aggregations.pipeline.derivative.InternalDerivative;
import org.elasticsearch.search.aggregations.pipeline.movavg.MovAvgPipelineAggregationBuilder;
import org.elasticsearch.search.aggregations.pipeline.movavg.MovAvgPipelineAggregator;
import org.elasticsearch.search.aggregations.pipeline.movavg.models.EwmaModel;
import org.elasticsearch.search.aggregations.pipeline.movavg.models.HoltLinearModel;
import org.elasticsearch.search.aggregations.pipeline.movavg.models.HoltWintersModel;
import org.elasticsearch.search.aggregations.pipeline.movavg.models.LinearModel;
import org.elasticsearch.search.aggregations.pipeline.movavg.models.MovAvgModel;
import org.elasticsearch.search.aggregations.pipeline.movavg.models.SimpleModel;
import org.elasticsearch.search.aggregations.pipeline.serialdiff.SerialDiffPipelineAggregationBuilder;
import org.elasticsearch.search.aggregations.pipeline.serialdiff.SerialDiffPipelineAggregator;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.fetch.FetchPhase;
import org.elasticsearch.search.fetch.FetchSubPhase;
import org.elasticsearch.search.fetch.subphase.DocValueFieldsFetchSubPhase;
import org.elasticsearch.search.fetch.subphase.ExplainFetchSubPhase;
import org.elasticsearch.search.fetch.subphase.FetchSourceSubPhase;
import org.elasticsearch.search.fetch.subphase.MatchedQueriesFetchSubPhase;
import org.elasticsearch.search.fetch.subphase.ParentFieldSubFetchPhase;
import org.elasticsearch.search.fetch.subphase.ScriptFieldsFetchSubPhase;
import org.elasticsearch.search.fetch.subphase.VersionFetchSubPhase;
import org.elasticsearch.search.fetch.subphase.highlight.FastVectorHighlighter;
import org.elasticsearch.search.fetch.subphase.highlight.HighlightPhase;
import org.elasticsearch.search.fetch.subphase.highlight.Highlighter;
import org.elasticsearch.search.fetch.subphase.highlight.PlainHighlighter;
import org.elasticsearch.search.fetch.subphase.highlight.PostingsHighlighter;
import org.elasticsearch.search.rescore.QueryRescorerBuilder;
import org.elasticsearch.search.rescore.RescoreBuilder;
import org.elasticsearch.search.sort.FieldSortBuilder;
import org.elasticsearch.search.sort.GeoDistanceSortBuilder;
import org.elasticsearch.search.sort.ScoreSortBuilder;
import org.elasticsearch.search.sort.ScriptSortBuilder;
import org.elasticsearch.search.sort.SortBuilder;
import org.elasticsearch.search.suggest.Suggester;
import org.elasticsearch.search.suggest.Suggesters;
import org.elasticsearch.search.suggest.SuggestionBuilder;
import org.elasticsearch.search.suggest.completion.CompletionSuggester;
import org.elasticsearch.search.suggest.phrase.Laplace;
import org.elasticsearch.search.suggest.phrase.LinearInterpolation;
import org.elasticsearch.search.suggest.phrase.PhraseSuggester;
import org.elasticsearch.search.suggest.phrase.SmoothingModel;
import org.elasticsearch.search.suggest.phrase.StupidBackoff;
import org.elasticsearch.search.suggest.term.TermSuggester;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.function.Consumer;
import java.util.function.Function;
import static java.util.Collections.unmodifiableMap;
import static java.util.Objects.requireNonNull;
/**
* Sets up things that can be done at search time like queries, aggregations, and suggesters.
*/
public class SearchModule extends AbstractModule {
private final boolean transportClient;
private final Map<String, Highlighter> highlighters;
private final Map<String, Suggester<?>> suggesters;
private final ParseFieldRegistry<ScoreFunctionParser<?>> scoreFunctionParserRegistry = new ParseFieldRegistry<>("score_function");
private final IndicesQueriesRegistry queryParserRegistry = new IndicesQueriesRegistry();
private final ParseFieldRegistry<Aggregator.Parser> aggregationParserRegistry = new ParseFieldRegistry<>("aggregation");
private final ParseFieldRegistry<PipelineAggregator.Parser> pipelineAggregationParserRegistry = new ParseFieldRegistry<>(
"pipline_aggregation");
private final AggregatorParsers aggregatorParsers = new AggregatorParsers(aggregationParserRegistry, pipelineAggregationParserRegistry);
private final ParseFieldRegistry<SignificanceHeuristicParser> significanceHeuristicParserRegistry = new ParseFieldRegistry<>(
"significance_heuristic");
private final ParseFieldRegistry<MovAvgModel.AbstractModelParser> movingAverageModelParserRegistry = new ParseFieldRegistry<>(
"moving_avg_model");
private final List<FetchSubPhase> fetchSubPhases = new ArrayList<>();
private final Settings settings;
private final List<Entry> namedWriteables = new ArrayList<>();
public static final Setting<Integer> INDICES_MAX_CLAUSE_COUNT_SETTING = Setting.intSetting("indices.query.bool.max_clause_count",
1024, 1, Integer.MAX_VALUE, Setting.Property.NodeScope);
// pkg private so tests can mock
Class<? extends SearchService> searchServiceImpl = SearchService.class;
public SearchModule(Settings settings, boolean transportClient, List<SearchPlugin> plugins) {
this.settings = settings;
this.transportClient = transportClient;
suggesters = setupSuggesters(plugins);
highlighters = setupHighlighters(settings, plugins);
registerScoreFunctions(plugins);
registerQueryParsers(plugins);
registerRescorers();
registerSorts();
registerValueFormats();
registerSignificanceHeuristics(plugins);
registerMovingAverageModels(plugins);
registerBuiltinAggregations();
registerFetchSubPhases(plugins);
registerShapes();
}
public List<Entry> getNamedWriteables() {
return namedWriteables;
}
public Suggesters getSuggesters() {
return new Suggesters(suggesters);
}
public IndicesQueriesRegistry getQueryParserRegistry() {
return queryParserRegistry;
}
/**
* Returns the {@link Highlighter} registry
*/
public Map<String, Highlighter> getHighlighters() {
return highlighters;
}
/**
* The registry of {@link SignificanceHeuristic}s.
*/
public ParseFieldRegistry<SignificanceHeuristicParser> getSignificanceHeuristicParserRegistry() {
return significanceHeuristicParserRegistry;
}
/**
* The registry of {@link MovAvgModel}s.
*/
public ParseFieldRegistry<MovAvgModel.AbstractModelParser> getMovingAverageModelParserRegistry() {
return movingAverageModelParserRegistry;
}
/**
* Register an aggregation.
*/
public void registerAggregation(AggregationSpec spec) {
if (false == transportClient) {
aggregationParserRegistry.register(spec.parser, spec.name);
}
namedWriteables.add(new Entry(AggregationBuilder.class, spec.name.getPreferredName(), spec.builderReader));
for (Map.Entry<String, Writeable.Reader<? extends InternalAggregation>> t : spec.resultReaders.entrySet()) {
String writeableName = t.getKey();
Writeable.Reader<? extends InternalAggregation> internalReader = t.getValue();
namedWriteables.add(new Entry(InternalAggregation.class, writeableName, internalReader));
}
}
public static class AggregationSpec {
private final Map<String, Writeable.Reader<? extends InternalAggregation>> resultReaders = new TreeMap<>();
private final Writeable.Reader<? extends AggregationBuilder> builderReader;
private final Aggregator.Parser parser;
private final ParseField name;
/**
* Register an aggregation.
*
* @param builderReader reads the {@link AggregationBuilder} from a stream
* @param parser reads the aggregation builder from XContent
* @param name names by which the aggregation may be parsed. The first name is special because it is the name that the reader is
* registered under.
*/
public AggregationSpec(Reader<? extends AggregationBuilder> builderReader, Aggregator.Parser parser,
ParseField name) {
this.builderReader = builderReader;
this.parser = parser;
this.name = name;
}
/**
* Add a reader for the shard level results of the aggregation with {@linkplain #name}'s {@link ParseField#getPreferredName()} as
* the {@link NamedWriteable#getWriteableName()}.
*/
public AggregationSpec addResultReader(Writeable.Reader<? extends InternalAggregation> resultReader) {
return addResultReader(name.getPreferredName(), resultReader);
}
/**
* Add a reader for the shard level results of the aggregation.
*/
public AggregationSpec addResultReader(String writeableName, Writeable.Reader<? extends InternalAggregation> resultReader) {
resultReaders.put(writeableName, resultReader);
return this;
}
}
/**
* Register a pipeline aggregation.
*/
public void registerPipelineAggregation(PipelineAggregationSpec spec) {
if (false == transportClient) {
pipelineAggregationParserRegistry.register(spec.parser, spec.name);
}
namedWriteables.add(new Entry(PipelineAggregationBuilder.class, spec.name.getPreferredName(), spec.builderReader));
namedWriteables.add(new Entry(PipelineAggregator.class, spec.name.getPreferredName(), spec.aggregatorReader));
for (Map.Entry<String, Writeable.Reader<? extends InternalAggregation>> resultReader : spec.resultReaders.entrySet()) {
namedWriteables.add(new Entry(InternalAggregation.class, resultReader.getKey(), resultReader.getValue()));
}
}
public static class PipelineAggregationSpec {
private final Map<String, Writeable.Reader<? extends InternalAggregation>> resultReaders = new TreeMap<>();
private final Writeable.Reader<? extends PipelineAggregationBuilder> builderReader;
private final Writeable.Reader<? extends PipelineAggregator> aggregatorReader;
private final PipelineAggregator.Parser parser;
private final ParseField name;
/**
* Register a pipeline aggregation.
*
* @param builderReader reads the {@link PipelineAggregationBuilder} from a stream
* @param aggregatorReader reads the {@link PipelineAggregator} from a stream
* @param parser reads the aggregation builder from XContent
* @param name names by which the aggregation may be parsed. The first name is special because it is the name that the reader is
* registered under.
*/
public PipelineAggregationSpec(Reader<? extends PipelineAggregationBuilder> builderReader,
Writeable.Reader<? extends PipelineAggregator> aggregatorReader,
PipelineAggregator.Parser parser, ParseField name) {
this.builderReader = builderReader;
this.aggregatorReader = aggregatorReader;
this.parser = parser;
this.name = name;
}
/**
* Add a reader for the shard level results of the aggregation with {@linkplain #name}'s {@link ParseField#getPreferredName()} as
* the {@link NamedWriteable#getWriteableName()}.
*/
public PipelineAggregationSpec addResultReader(Writeable.Reader<? extends InternalAggregation> resultReader) {
return addResultReader(name.getPreferredName(), resultReader);
}
/**
* Add a reader for the shard level results of the aggregation.
*/
public PipelineAggregationSpec addResultReader(String writeableName, Writeable.Reader<? extends InternalAggregation> resultReader) {
resultReaders.put(writeableName, resultReader);
return this;
}
}
@Override
protected void configure() {
if (false == transportClient) {
/*
* Nothing is bound for transport client *but* SearchModule is still responsible for settings up the things like the
* NamedWriteableRegistry.
*/
bind(IndicesQueriesRegistry.class).toInstance(queryParserRegistry);
bind(Suggesters.class).toInstance(getSuggesters());
configureSearch();
bind(AggregatorParsers.class).toInstance(aggregatorParsers);
}
}
private void registerBuiltinAggregations() {
registerAggregation(new AggregationSpec(AvgAggregationBuilder::new, new AvgParser(), AvgAggregationBuilder.AGGREGATION_NAME_FIELD)
.addResultReader(InternalAvg::new));
registerAggregation(new AggregationSpec(SumAggregationBuilder::new, new SumParser(), SumAggregationBuilder.AGGREGATION_NAME_FIELD)
.addResultReader(InternalSum::new));
registerAggregation(new AggregationSpec(MinAggregationBuilder::new, new MinParser(), MinAggregationBuilder.AGGREGATION_NAME_FIELD)
.addResultReader(InternalMin::new));
registerAggregation(new AggregationSpec(MaxAggregationBuilder::new, new MaxParser(), MaxAggregationBuilder.AGGREGATION_NAME_FIELD)
.addResultReader(InternalMax::new));
registerAggregation(new AggregationSpec(StatsAggregationBuilder::new, new StatsParser(),
StatsAggregationBuilder.AGGREGATION_NAME_FIELD).addResultReader(InternalStats::new));
registerAggregation(new AggregationSpec(ExtendedStatsAggregationBuilder::new, new ExtendedStatsParser(),
ExtendedStatsAggregationBuilder.AGGREGATION_NAME_FIELD).addResultReader(InternalExtendedStats::new));
registerAggregation(new AggregationSpec(ValueCountAggregationBuilder::new, new ValueCountParser(),
ValueCountAggregationBuilder.AGGREGATION_NAME_FIELD).addResultReader(InternalValueCount::new));
registerAggregation(new AggregationSpec(PercentilesAggregationBuilder::new, new PercentilesParser(),
PercentilesAggregationBuilder.AGGREGATION_NAME_FIELD)
.addResultReader(InternalTDigestPercentiles.NAME, InternalTDigestPercentiles::new)
.addResultReader(InternalHDRPercentiles.NAME, InternalHDRPercentiles::new));
registerAggregation(new AggregationSpec(PercentileRanksAggregationBuilder::new, new PercentileRanksParser(),
PercentileRanksAggregationBuilder.AGGREGATION_NAME_FIELD)
.addResultReader(InternalTDigestPercentileRanks.NAME, InternalTDigestPercentileRanks::new)
.addResultReader(InternalHDRPercentileRanks.NAME, InternalHDRPercentileRanks::new));
registerAggregation(new AggregationSpec(CardinalityAggregationBuilder::new, new CardinalityParser(),
CardinalityAggregationBuilder.AGGREGATION_NAME_FIELD).addResultReader(InternalCardinality::new));
registerAggregation(new AggregationSpec(GlobalAggregationBuilder::new, GlobalAggregationBuilder::parse,
GlobalAggregationBuilder.AGGREGATION_NAME_FIELD).addResultReader(InternalGlobal::new));
registerAggregation(
new AggregationSpec(MissingAggregationBuilder::new, new MissingParser(), MissingAggregationBuilder.AGGREGATION_NAME_FIELD)
.addResultReader(InternalMissing::new));
registerAggregation(new AggregationSpec(FilterAggregationBuilder::new, FilterAggregationBuilder::parse,
FilterAggregationBuilder.AGGREGATION_NAME_FIELD).addResultReader(InternalFilter::new));
registerAggregation(new AggregationSpec(FiltersAggregationBuilder::new, FiltersAggregationBuilder::parse,
FiltersAggregationBuilder.AGGREGATION_NAME_FIELD).addResultReader(InternalFilters::new));
registerAggregation(new AggregationSpec(SamplerAggregationBuilder::new, SamplerAggregationBuilder::parse,
SamplerAggregationBuilder.AGGREGATION_NAME_FIELD).addResultReader(InternalSampler.NAME, InternalSampler::new)
.addResultReader(UnmappedSampler.NAME, UnmappedSampler::new));
registerAggregation(new AggregationSpec(DiversifiedAggregationBuilder::new, new DiversifiedSamplerParser(),
DiversifiedAggregationBuilder.AGGREGATION_NAME_FIELD));
registerAggregation(
new AggregationSpec(TermsAggregationBuilder::new, new TermsParser(), TermsAggregationBuilder.AGGREGATION_NAME_FIELD)
.addResultReader(StringTerms.NAME, StringTerms::new)
.addResultReader(UnmappedTerms.NAME, UnmappedTerms::new)
.addResultReader(LongTerms.NAME, LongTerms::new)
.addResultReader(DoubleTerms.NAME, DoubleTerms::new));
registerAggregation(new AggregationSpec(SignificantTermsAggregationBuilder::new,
new SignificantTermsParser(significanceHeuristicParserRegistry, queryParserRegistry),
SignificantTermsAggregationBuilder.AGGREGATION_NAME_FIELD)
.addResultReader(SignificantStringTerms.NAME, SignificantStringTerms::new)
.addResultReader(SignificantLongTerms.NAME, SignificantLongTerms::new)
.addResultReader(UnmappedSignificantTerms.NAME, UnmappedSignificantTerms::new));
registerAggregation(new AggregationSpec(RangeAggregationBuilder::new, new RangeParser(),
RangeAggregationBuilder.AGGREGATION_NAME_FIELD).addResultReader(InternalRange::new));
registerAggregation(new AggregationSpec(DateRangeAggregationBuilder::new, new DateRangeParser(),
DateRangeAggregationBuilder.AGGREGATION_NAME_FIELD).addResultReader(InternalDateRange::new));
registerAggregation(
new AggregationSpec(IpRangeAggregationBuilder::new, new IpRangeParser(), IpRangeAggregationBuilder.AGGREGATION_NAME_FIELD)
.addResultReader(InternalBinaryRange::new));
registerAggregation(new AggregationSpec(HistogramAggregationBuilder::new, new HistogramParser(),
HistogramAggregationBuilder.AGGREGATION_NAME_FIELD).addResultReader(InternalHistogram::new));
registerAggregation(new AggregationSpec(DateHistogramAggregationBuilder::new, new DateHistogramParser(),
DateHistogramAggregationBuilder.AGGREGATION_NAME_FIELD).addResultReader(InternalDateHistogram::new));
registerAggregation(new AggregationSpec(GeoDistanceAggregationBuilder::new, new GeoDistanceParser(),
GeoDistanceAggregationBuilder.AGGREGATION_NAME_FIELD).addResultReader(InternalGeoDistance::new));
registerAggregation(new AggregationSpec(GeoGridAggregationBuilder::new, new GeoHashGridParser(),
GeoGridAggregationBuilder.AGGREGATION_NAME_FIELD).addResultReader(InternalGeoHashGrid::new));
registerAggregation(new AggregationSpec(NestedAggregationBuilder::new, NestedAggregationBuilder::parse,
NestedAggregationBuilder.AGGREGATION_FIELD_NAME).addResultReader(InternalNested::new));
registerAggregation(new AggregationSpec(ReverseNestedAggregationBuilder::new, ReverseNestedAggregationBuilder::parse,
ReverseNestedAggregationBuilder.AGGREGATION_NAME_FIELD).addResultReader(InternalReverseNested::new));
registerAggregation(new AggregationSpec(TopHitsAggregationBuilder::new, TopHitsAggregationBuilder::parse,
TopHitsAggregationBuilder.AGGREGATION_NAME_FIELD).addResultReader(InternalTopHits::new));
registerAggregation(new AggregationSpec(GeoBoundsAggregationBuilder::new, new GeoBoundsParser(),
GeoBoundsAggregationBuilder.AGGREGATION_NAME_FIED).addResultReader(InternalGeoBounds::new));
registerAggregation(new AggregationSpec(GeoCentroidAggregationBuilder::new, new GeoCentroidParser(),
GeoCentroidAggregationBuilder.AGGREGATION_NAME_FIELD).addResultReader(InternalGeoCentroid::new));
registerAggregation(new AggregationSpec(ScriptedMetricAggregationBuilder::new, ScriptedMetricAggregationBuilder::parse,
ScriptedMetricAggregationBuilder.AGGREGATION_NAME_FIELD).addResultReader(InternalScriptedMetric::new));
registerAggregation(new AggregationSpec(ChildrenAggregationBuilder::new, ChildrenAggregationBuilder::parse,
ChildrenAggregationBuilder.AGGREGATION_NAME_FIELD).addResultReader(InternalChildren::new));
registerPipelineAggregation(new PipelineAggregationSpec(
DerivativePipelineAggregationBuilder::new,
DerivativePipelineAggregator::new,
DerivativePipelineAggregationBuilder::parse,
DerivativePipelineAggregationBuilder.AGGREGATION_NAME_FIELD)
.addResultReader(InternalDerivative::new));
registerPipelineAggregation(new PipelineAggregationSpec(
MaxBucketPipelineAggregationBuilder::new,
MaxBucketPipelineAggregator::new,
MaxBucketPipelineAggregationBuilder.PARSER,
MaxBucketPipelineAggregationBuilder.AGGREGATION_NAME_FIELD)
// This bucket is used by many pipeline aggreations.
.addResultReader(InternalBucketMetricValue.NAME, InternalBucketMetricValue::new));
registerPipelineAggregation(new PipelineAggregationSpec(
MinBucketPipelineAggregationBuilder::new,
MinBucketPipelineAggregator::new,
MinBucketPipelineAggregationBuilder.PARSER,
MinBucketPipelineAggregationBuilder.AGGREGATION_FIELD_NAME)
/* Uses InternalBucketMetricValue */);
registerPipelineAggregation(new PipelineAggregationSpec(
AvgBucketPipelineAggregationBuilder::new,
AvgBucketPipelineAggregator::new,
AvgBucketPipelineAggregationBuilder.PARSER,
AvgBucketPipelineAggregationBuilder.AGGREGATION_NAME_FIELD)
// This bucket is used by many pipeline aggreations.
.addResultReader(InternalSimpleValue.NAME, InternalSimpleValue::new));
registerPipelineAggregation(new PipelineAggregationSpec(
SumBucketPipelineAggregationBuilder::new,
SumBucketPipelineAggregator::new,
SumBucketPipelineAggregationBuilder.PARSER,
SumBucketPipelineAggregationBuilder.AGGREGATION_NAME_FIELD)
/* Uses InternalSimpleValue */);
registerPipelineAggregation(new PipelineAggregationSpec(
StatsBucketPipelineAggregationBuilder::new,
StatsBucketPipelineAggregator::new,
StatsBucketPipelineAggregationBuilder.PARSER,
StatsBucketPipelineAggregationBuilder.AGGREGATION_NAME_FIELD)
.addResultReader(InternalStatsBucket::new));
registerPipelineAggregation(new PipelineAggregationSpec(
ExtendedStatsBucketPipelineAggregationBuilder::new,
ExtendedStatsBucketPipelineAggregator::new,
new ExtendedStatsBucketParser(),
ExtendedStatsBucketPipelineAggregationBuilder.AGGREGATION_NAME_FIELD)
.addResultReader(InternalExtendedStatsBucket::new));
registerPipelineAggregation(new PipelineAggregationSpec(
PercentilesBucketPipelineAggregationBuilder::new,
PercentilesBucketPipelineAggregator::new,
PercentilesBucketPipelineAggregationBuilder.PARSER,
PercentilesBucketPipelineAggregationBuilder.AGGREGATION_NAME_FIELD)
.addResultReader(InternalPercentilesBucket::new));
registerPipelineAggregation(new PipelineAggregationSpec(
MovAvgPipelineAggregationBuilder::new,
MovAvgPipelineAggregator::new,
(n, c) -> MovAvgPipelineAggregationBuilder.parse(movingAverageModelParserRegistry, n, c),
MovAvgPipelineAggregationBuilder.AGGREGATION_FIELD_NAME)
/* Uses InternalHistogram for buckets */);
registerPipelineAggregation(new PipelineAggregationSpec(
CumulativeSumPipelineAggregationBuilder::new,
CumulativeSumPipelineAggregator::new,
CumulativeSumPipelineAggregationBuilder::parse,
CumulativeSumPipelineAggregationBuilder.AGGREGATION_NAME_FIELD));
registerPipelineAggregation(new PipelineAggregationSpec(
BucketScriptPipelineAggregationBuilder::new,
BucketScriptPipelineAggregator::new,
BucketScriptPipelineAggregationBuilder::parse,
BucketScriptPipelineAggregationBuilder.AGGREGATION_NAME_FIELD));
registerPipelineAggregation(new PipelineAggregationSpec(
BucketSelectorPipelineAggregationBuilder::new,
BucketSelectorPipelineAggregator::new,
BucketSelectorPipelineAggregationBuilder::parse,
BucketSelectorPipelineAggregationBuilder.AGGREGATION_NAME_FIELD));
registerPipelineAggregation(new PipelineAggregationSpec(
SerialDiffPipelineAggregationBuilder::new,
SerialDiffPipelineAggregator::new,
SerialDiffPipelineAggregationBuilder::parse,
SerialDiffPipelineAggregationBuilder.AGGREGATION_NAME_FIELD));
}
protected void configureSearch() {
// configure search private classes...
bind(SearchPhaseController.class).asEagerSingleton();
bind(FetchPhase.class).toInstance(new FetchPhase(fetchSubPhases));
bind(SearchTransportService.class).asEagerSingleton();
if (searchServiceImpl == SearchService.class) {
bind(SearchService.class).asEagerSingleton();
} else {
bind(SearchService.class).to(searchServiceImpl).asEagerSingleton();
}
}
private void registerShapes() {
if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) {
ShapeBuilders.register(namedWriteables);
}
}
private void registerRescorers() {
namedWriteables.add(new Entry(RescoreBuilder.class, QueryRescorerBuilder.NAME, QueryRescorerBuilder::new));
}
private void registerSorts() {
namedWriteables.add(new Entry(SortBuilder.class, GeoDistanceSortBuilder.NAME, GeoDistanceSortBuilder::new));
namedWriteables.add(new Entry(SortBuilder.class, ScoreSortBuilder.NAME, ScoreSortBuilder::new));
namedWriteables.add(new Entry(SortBuilder.class, ScriptSortBuilder.NAME, ScriptSortBuilder::new));
namedWriteables.add(new Entry(SortBuilder.class, FieldSortBuilder.NAME, FieldSortBuilder::new));
}
private <T> void registerFromPlugin(List<SearchPlugin> plugins, Function<SearchPlugin, List<T>> producer, Consumer<T> consumer) {
for (SearchPlugin plugin : plugins) {
for (T t : producer.apply(plugin)) {
consumer.accept(t);
}
}
}
public static void registerSmoothingModels(List<Entry> namedWriteables) {
namedWriteables.add(new Entry(SmoothingModel.class, Laplace.NAME, Laplace::new));
namedWriteables.add(new Entry(SmoothingModel.class, LinearInterpolation.NAME, LinearInterpolation::new));
namedWriteables.add(new Entry(SmoothingModel.class, StupidBackoff.NAME, StupidBackoff::new));
}
private Map<String, Suggester<?>> setupSuggesters(List<SearchPlugin> plugins) {
registerSmoothingModels(namedWriteables);
// Suggester<?> is weird - it is both a Parser and a reader....
NamedRegistry<Suggester<?>> suggesters = new NamedRegistry<Suggester<?>>("suggester") {
@Override
public void register(String name, Suggester<?> t) {
super.register(name, t);
namedWriteables.add(new Entry(SuggestionBuilder.class, name, t));
}
};
suggesters.register("phrase", PhraseSuggester.INSTANCE);
suggesters.register("term", TermSuggester.INSTANCE);
suggesters.register("completion", CompletionSuggester.INSTANCE);
suggesters.extractAndRegister(plugins, SearchPlugin::getSuggesters);
return unmodifiableMap(suggesters.getRegistry());
}
private Map<String, Highlighter> setupHighlighters(Settings settings, List<SearchPlugin> plugins) {
NamedRegistry<Highlighter> highlighters = new NamedRegistry<>("highlighter");
highlighters.register("fvh", new FastVectorHighlighter(settings));
highlighters.register("plain", new PlainHighlighter());
highlighters.register("postings", new PostingsHighlighter());
highlighters.extractAndRegister(plugins, SearchPlugin::getHighlighters);
return unmodifiableMap(highlighters.getRegistry());
}
private void registerScoreFunctions(List<SearchPlugin> plugins) {
registerScoreFunction(new ScoreFunctionSpec<>(ScriptScoreFunctionBuilder.NAME, ScriptScoreFunctionBuilder::new,
ScriptScoreFunctionBuilder::fromXContent));
registerScoreFunction(
new ScoreFunctionSpec<>(GaussDecayFunctionBuilder.NAME, GaussDecayFunctionBuilder::new, GaussDecayFunctionBuilder.PARSER));
registerScoreFunction(new ScoreFunctionSpec<>(LinearDecayFunctionBuilder.NAME, LinearDecayFunctionBuilder::new,
LinearDecayFunctionBuilder.PARSER));
registerScoreFunction(new ScoreFunctionSpec<>(ExponentialDecayFunctionBuilder.NAME, ExponentialDecayFunctionBuilder::new,
ExponentialDecayFunctionBuilder.PARSER));
registerScoreFunction(new ScoreFunctionSpec<>(RandomScoreFunctionBuilder.NAME, RandomScoreFunctionBuilder::new,
RandomScoreFunctionBuilder::fromXContent));
registerScoreFunction(new ScoreFunctionSpec<>(FieldValueFactorFunctionBuilder.NAME, FieldValueFactorFunctionBuilder::new,
FieldValueFactorFunctionBuilder::fromXContent));
//weight doesn't have its own parser, so every function supports it out of the box.
//Can be a single function too when not associated to any other function, which is why it needs to be registered manually here.
namedWriteables.add(new Entry(ScoreFunctionBuilder.class, WeightBuilder.NAME, WeightBuilder::new));
registerFromPlugin(plugins, SearchPlugin::getScoreFunctions, this::registerScoreFunction);
}
private void registerScoreFunction(ScoreFunctionSpec<?> scoreFunction) {
scoreFunctionParserRegistry.register(scoreFunction.getParser(), scoreFunction.getName());
namedWriteables.add(new Entry(ScoreFunctionBuilder.class, scoreFunction.getName().getPreferredName(), scoreFunction.getReader()));
}
private void registerValueFormats() {
registerValueFormat(DocValueFormat.BOOLEAN.getWriteableName(), in -> DocValueFormat.BOOLEAN);
registerValueFormat(DocValueFormat.DateTime.NAME, DocValueFormat.DateTime::new);
registerValueFormat(DocValueFormat.Decimal.NAME, DocValueFormat.Decimal::new);
registerValueFormat(DocValueFormat.GEOHASH.getWriteableName(), in -> DocValueFormat.GEOHASH);
registerValueFormat(DocValueFormat.IP.getWriteableName(), in -> DocValueFormat.IP);
registerValueFormat(DocValueFormat.RAW.getWriteableName(), in -> DocValueFormat.RAW);
}
/**
* Register a new ValueFormat.
*/
private void registerValueFormat(String name, Writeable.Reader<? extends DocValueFormat> reader) {
namedWriteables.add(new Entry(DocValueFormat.class, name, reader));
}
private void registerSignificanceHeuristics(List<SearchPlugin> plugins) {
registerSignificanceHeuristic(new SearchExtensionSpec<>(ChiSquare.NAME, ChiSquare::new, ChiSquare.PARSER));
registerSignificanceHeuristic(new SearchExtensionSpec<>(GND.NAME, GND::new, GND.PARSER));
registerSignificanceHeuristic(new SearchExtensionSpec<>(JLHScore.NAME, JLHScore::new, JLHScore::parse));
registerSignificanceHeuristic(new SearchExtensionSpec<>(MutualInformation.NAME, MutualInformation::new, MutualInformation.PARSER));
registerSignificanceHeuristic(new SearchExtensionSpec<>(PercentageScore.NAME, PercentageScore::new, PercentageScore::parse));
registerSignificanceHeuristic(new SearchExtensionSpec<>(ScriptHeuristic.NAME, ScriptHeuristic::new, ScriptHeuristic::parse));
registerFromPlugin(plugins, SearchPlugin::getSignificanceHeuristics, this::registerSignificanceHeuristic);
}
private void registerSignificanceHeuristic(SearchExtensionSpec<SignificanceHeuristic, SignificanceHeuristicParser> heuristic) {
significanceHeuristicParserRegistry.register(heuristic.getParser(), heuristic.getName());
namedWriteables.add(new Entry(SignificanceHeuristic.class, heuristic.getName().getPreferredName(), heuristic.getReader()));
}
private void registerMovingAverageModels(List<SearchPlugin> plugins) {
registerMovingAverageModel(new SearchExtensionSpec<>(SimpleModel.NAME, SimpleModel::new, SimpleModel.PARSER));
registerMovingAverageModel(new SearchExtensionSpec<>(LinearModel.NAME, LinearModel::new, LinearModel.PARSER));
registerMovingAverageModel(new SearchExtensionSpec<>(EwmaModel.NAME, EwmaModel::new, EwmaModel.PARSER));
registerMovingAverageModel(new SearchExtensionSpec<>(HoltLinearModel.NAME, HoltLinearModel::new, HoltLinearModel.PARSER));
registerMovingAverageModel(new SearchExtensionSpec<>(HoltWintersModel.NAME, HoltWintersModel::new, HoltWintersModel.PARSER));
registerFromPlugin(plugins, SearchPlugin::getMovingAverageModels, this::registerMovingAverageModel);
}
private void registerMovingAverageModel(SearchExtensionSpec<MovAvgModel, MovAvgModel.AbstractModelParser> movAvgModel) {
movingAverageModelParserRegistry.register(movAvgModel.getParser(), movAvgModel.getName());
namedWriteables.add(new Entry(MovAvgModel.class, movAvgModel.getName().getPreferredName(), movAvgModel.getReader()));
}
private void registerFetchSubPhases(List<SearchPlugin> plugins) {
registerFetchSubPhase(new ExplainFetchSubPhase());
registerFetchSubPhase(new DocValueFieldsFetchSubPhase());
registerFetchSubPhase(new ScriptFieldsFetchSubPhase());
registerFetchSubPhase(new FetchSourceSubPhase());
registerFetchSubPhase(new VersionFetchSubPhase());
registerFetchSubPhase(new MatchedQueriesFetchSubPhase());
registerFetchSubPhase(new HighlightPhase(settings, highlighters));
registerFetchSubPhase(new ParentFieldSubFetchPhase());
FetchPhaseConstructionContext context = new FetchPhaseConstructionContext(highlighters);
registerFromPlugin(plugins, p -> p.getFetchSubPhases(context), this::registerFetchSubPhase);
}
private void registerFetchSubPhase(FetchSubPhase subPhase) {
Class<?> subPhaseClass = subPhase.getClass();
if (fetchSubPhases.stream().anyMatch(p -> p.getClass().equals(subPhaseClass))) {
throw new IllegalArgumentException("FetchSubPhase [" + subPhaseClass + "] already registered");
}
fetchSubPhases.add(requireNonNull(subPhase, "FetchSubPhase must not be null"));
}
private void registerQueryParsers(List<SearchPlugin> plugins) {
registerQuery(new QuerySpec<>(MatchQueryBuilder.QUERY_NAME_FIELD, MatchQueryBuilder::new, MatchQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(MatchPhraseQueryBuilder.NAME, MatchPhraseQueryBuilder::new, MatchPhraseQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(MatchPhrasePrefixQueryBuilder.NAME, MatchPhrasePrefixQueryBuilder::new,
MatchPhrasePrefixQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(MultiMatchQueryBuilder.NAME, MultiMatchQueryBuilder::new, MultiMatchQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(NestedQueryBuilder.NAME, NestedQueryBuilder::new, NestedQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(HasChildQueryBuilder.NAME, HasChildQueryBuilder::new, HasChildQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(HasParentQueryBuilder.NAME, HasParentQueryBuilder::new, HasParentQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(DisMaxQueryBuilder.NAME, DisMaxQueryBuilder::new, DisMaxQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(IdsQueryBuilder.NAME, IdsQueryBuilder::new, IdsQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(MatchAllQueryBuilder.NAME, MatchAllQueryBuilder::new, MatchAllQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(QueryStringQueryBuilder.NAME, QueryStringQueryBuilder::new, QueryStringQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(BoostingQueryBuilder.NAME, BoostingQueryBuilder::new, BoostingQueryBuilder::fromXContent));
BooleanQuery.setMaxClauseCount(INDICES_MAX_CLAUSE_COUNT_SETTING.get(settings));
registerQuery(new QuerySpec<>(BoolQueryBuilder.NAME, BoolQueryBuilder::new, BoolQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(TermQueryBuilder.NAME, TermQueryBuilder::new, TermQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(TermsQueryBuilder.QUERY_NAME_FIELD, TermsQueryBuilder::new, TermsQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(FuzzyQueryBuilder.NAME, FuzzyQueryBuilder::new, FuzzyQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(RegexpQueryBuilder.NAME, RegexpQueryBuilder::new, RegexpQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(RangeQueryBuilder.NAME, RangeQueryBuilder::new, RangeQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(PrefixQueryBuilder.NAME, PrefixQueryBuilder::new, PrefixQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(WildcardQueryBuilder.NAME, WildcardQueryBuilder::new, WildcardQueryBuilder::fromXContent));
registerQuery(
new QuerySpec<>(ConstantScoreQueryBuilder.NAME, ConstantScoreQueryBuilder::new, ConstantScoreQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(SpanTermQueryBuilder.NAME, SpanTermQueryBuilder::new, SpanTermQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(SpanNotQueryBuilder.NAME, SpanNotQueryBuilder::new, SpanNotQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(SpanWithinQueryBuilder.NAME, SpanWithinQueryBuilder::new, SpanWithinQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(SpanContainingQueryBuilder.NAME, SpanContainingQueryBuilder::new,
SpanContainingQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(FieldMaskingSpanQueryBuilder.NAME, FieldMaskingSpanQueryBuilder::new,
FieldMaskingSpanQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(SpanFirstQueryBuilder.NAME, SpanFirstQueryBuilder::new, SpanFirstQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(SpanNearQueryBuilder.NAME, SpanNearQueryBuilder::new, SpanNearQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(SpanOrQueryBuilder.NAME, SpanOrQueryBuilder::new, SpanOrQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(MoreLikeThisQueryBuilder.QUERY_NAME_FIELD, MoreLikeThisQueryBuilder::new,
MoreLikeThisQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(WrapperQueryBuilder.NAME, WrapperQueryBuilder::new, WrapperQueryBuilder::fromXContent));
// TODO Remove IndicesQuery in 6.0
registerQuery(new QuerySpec<>(IndicesQueryBuilder.NAME, IndicesQueryBuilder::new, IndicesQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(CommonTermsQueryBuilder.NAME, CommonTermsQueryBuilder::new, CommonTermsQueryBuilder::fromXContent));
registerQuery(
new QuerySpec<>(SpanMultiTermQueryBuilder.NAME, SpanMultiTermQueryBuilder::new, SpanMultiTermQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(FunctionScoreQueryBuilder.NAME, FunctionScoreQueryBuilder::new,
c -> FunctionScoreQueryBuilder.fromXContent(scoreFunctionParserRegistry, c)));
registerQuery(
new QuerySpec<>(SimpleQueryStringBuilder.NAME, SimpleQueryStringBuilder::new, SimpleQueryStringBuilder::fromXContent));
registerQuery(new QuerySpec<>(TypeQueryBuilder.NAME, TypeQueryBuilder::new, TypeQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(ScriptQueryBuilder.NAME, ScriptQueryBuilder::new, ScriptQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(GeoDistanceQueryBuilder.NAME, GeoDistanceQueryBuilder::new, GeoDistanceQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(GeoDistanceRangeQueryBuilder.NAME, GeoDistanceRangeQueryBuilder::new,
GeoDistanceRangeQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(GeoBoundingBoxQueryBuilder.QUERY_NAME_FIELD, GeoBoundingBoxQueryBuilder::new,
GeoBoundingBoxQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(GeohashCellQuery.NAME, GeohashCellQuery.Builder::new, GeohashCellQuery.Builder::fromXContent));
registerQuery(new QuerySpec<>(GeoPolygonQueryBuilder.NAME, GeoPolygonQueryBuilder::new, GeoPolygonQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(ExistsQueryBuilder.NAME, ExistsQueryBuilder::new, ExistsQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(MatchNoneQueryBuilder.NAME, MatchNoneQueryBuilder::new, MatchNoneQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(ParentIdQueryBuilder.NAME, ParentIdQueryBuilder::new, ParentIdQueryBuilder::fromXContent));
if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) {
registerQuery(new QuerySpec<>(GeoShapeQueryBuilder.NAME, GeoShapeQueryBuilder::new, GeoShapeQueryBuilder::fromXContent));
}
registerFromPlugin(plugins, SearchPlugin::getQueries, this::registerQuery);
}
private void registerQuery(QuerySpec<?> spec) {
queryParserRegistry.register(spec.getParser(), spec.getName());
namedWriteables.add(new Entry(QueryBuilder.class, spec.getName().getPreferredName(), spec.getReader()));
}
}
| apache-2.0 |
marcelkoopman/rest.camel | project/rest.camel/src/main/java/nl/mkoopman/micro/services/rest/camel/model/impl/User.java | 460 | package nl.mkoopman.micro.services.rest.camel.model.impl;
import nl.mkoopman.micro.services.rest.camel.model.Model;
public class User implements Model {
private String email;
private String password;
public String getEmail() {
return email;
}
public void setEmail(final String email) {
this.email = email;
}
public String getPassword() {
return password;
}
public void setPassword(final String password) {
this.password = password;
}
}
| apache-2.0 |
dbpedia-spotlight/dbpedia-spotlight-model | rest/src/main/java/org/dbpedia/spotlight/web/rest/formats/NIFWrapper.java | 5140 | package org.dbpedia.spotlight.web.rest.formats;
import org.dbpedia.spotlight.model.DBpediaResourceOccurrence;
import org.dbpedia.spotlight.model.OntologyType;
import org.dbpedia.spotlight.model.SurfaceFormOccurrence;
import org.nlp2rdf.NIF;
import org.nlp2rdf.bean.NIFBean;
import org.nlp2rdf.bean.NIFType;
import org.nlp2rdf.nif21.impl.NIF21;
import java.util.ArrayList;
import java.util.List;
import static org.dbpedia.spotlight.web.rest.common.Constants.SLASH;
public class NIFWrapper {
private SpotlightConfiguration configuration;
private List<NIFBean> entities = new ArrayList<>();
private NIFBean beanContext;
private String baseURI;
public NIFWrapper(SpotlightConfiguration configuration) {
this.configuration = configuration;
this.baseURI = configuration.getSpotlightURL();
formatBaseURI();
}
public NIFWrapper(SpotlightConfiguration configuration, String baseURI) {
this.configuration = configuration;
this.baseURI = baseURI;
formatBaseURI();
}
public void context(String mention) {
int beginIndex = 0;
int endIndex = mention.length();
NIFBean.NIFBeanBuilder contextBuilder = new NIFBean.NIFBeanBuilder();
contextBuilder.context(baseURI, beginIndex, endIndex).mention(mention).nifType(NIFType.CONTEXT);
beanContext = new NIFBean(contextBuilder);
}
private void formatBaseURI() {
if (baseURI != null && !baseURI.isEmpty() &&
!SLASH.equals(baseURI.substring(baseURI.length() - 1))) {
baseURI = baseURI.concat(SLASH);
}
}
public void entityFromResource ( List<DBpediaResourceOccurrence> occs, String text) {
NIFBean.NIFBeanBuilder entity = new NIFBean.NIFBeanBuilder();
if (occs != null) {
if(occs.size() == 0){
this.context(text);
}else {
occs.forEach(resourceItem -> {
this.context(resourceItem.context().text());
entity.mention(resourceItem.surfaceForm().name());
entity.beginIndex(resourceItem.textOffset());
entity.endIndex(resourceItem.textOffset() + resourceItem.surfaceForm().name().length());
entity.annotator(configuration.getSpotlightURL());
entity.taIdentRef(resourceItem.resource().getFullUri());
List<String> listTypes = new ArrayList<String>();
for (OntologyType otype : resourceItem.resource().getTypes()) {
listTypes.add(otype.getFullUri());
}
entity.types(listTypes);
entity.score(resourceItem.similarityScore());
entity.context(baseURI, resourceItem.textOffset(), resourceItem.textOffset() +
resourceItem.surfaceForm().name().length());
entities.add(new NIFBean(entity));
});
}
}
}
public void entity( List<SurfaceFormOccurrence> occs) {
NIFBean.NIFBeanBuilder entity = new NIFBean.NIFBeanBuilder();
if (occs != null) {
occs.forEach(resourceItem -> {
this.context(resourceItem.context().text());
entity.mention(resourceItem.surfaceForm().name());
entity.beginIndex(resourceItem.textOffset());
entity.endIndex(resourceItem.textOffset() + resourceItem.surfaceForm().name().length());
entity.annotator(configuration.getSpotlightURL());
//entity.taIdentRef(resourceItem.getUri());
// entity.types(resourceItem.typesList());
//entity.score(resourceItem.score());
entity.context(baseURI, resourceItem.textOffset(), resourceItem.textOffset() +
resourceItem.surfaceForm().name().length());
entities.add(new NIFBean(entity));
});
}
}
public String getNIF(String outputFormat) {
List<NIFBean> entitiesToProcess = new ArrayList<>(entities.size());
if(beanContext != null) {
entitiesToProcess.add(beanContext);
entitiesToProcess.addAll(entities);
}
NIF nif = new NIF21(entitiesToProcess);
return process(nif, outputFormat);
}
private String process(NIF nif, String outputFormat) {
if (outputFormat != null && SemanticMediaType.TEXT_TURTLE.equalsIgnoreCase(outputFormat)) {
return nif.getTurtle();
} else if (outputFormat != null && SemanticMediaType.APPLICATION_LD_JSON.equalsIgnoreCase(outputFormat)) {
return nif.getJSONLD(configuration.getJsonContext());
} else if (outputFormat != null && SemanticMediaType.APPLICATION_N_TRIPLES.equalsIgnoreCase(outputFormat)) {
return nif.getNTriples();
} else if (outputFormat != null && SemanticMediaType.APPLICATION_XML_RDF.equalsIgnoreCase(outputFormat)){
return nif.getRDFxml();
}
return nif.getTurtle();
}
}
| apache-2.0 |
diffplug/spotless | plugin-gradle/src/test/java/com/diffplug/gradle/spotless/PalantirJavaFormatIntegrationTest.java | 1501 | /*
* Copyright 2022 DiffPlug
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.diffplug.gradle.spotless;
import java.io.IOException;
import org.junit.jupiter.api.Test;
class PalantirJavaFormatIntegrationTest extends GradleIntegrationHarness {
@Test
void integration() throws IOException {
setFile("build.gradle").toLines(
"plugins {",
" id 'com.diffplug.spotless'",
"}",
"repositories { mavenCentral() }",
"",
"spotless {",
" java {",
" target file('test.java')",
" palantirJavaFormat('1.1.0')",
" }",
"}");
setFile("test.java").toResource("java/palantirjavaformat/JavaCodeUnformatted.test");
gradleRunner().withArguments("spotlessApply").build();
assertFile("test.java").sameAsResource("java/palantirjavaformat/JavaCodeFormatted.test");
checkRunsThenUpToDate();
replace("build.gradle",
"palantirJavaFormat('1.1.0')",
"palantirJavaFormat('1.0.1')");
checkRunsThenUpToDate();
}
}
| apache-2.0 |
aws/aws-sdk-java | aws-java-sdk-iotanalytics/src/main/java/com/amazonaws/services/iotanalytics/model/DatastoreIotSiteWiseMultiLayerStorageSummary.java | 5250 | /*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.iotanalytics.model;
import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.protocol.StructuredPojo;
import com.amazonaws.protocol.ProtocolMarshaller;
/**
* <p>
* Contains information about the data store that you manage, which stores data used by IoT SiteWise.
* </p>
*
* @see <a
* href="http://docs.aws.amazon.com/goto/WebAPI/iotanalytics-2017-11-27/DatastoreIotSiteWiseMultiLayerStorageSummary"
* target="_top">AWS API Documentation</a>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class DatastoreIotSiteWiseMultiLayerStorageSummary implements Serializable, Cloneable, StructuredPojo {
/**
* <p>
* Used to store data used by IoT SiteWise in an Amazon S3 bucket that you manage.
* </p>
*/
private IotSiteWiseCustomerManagedDatastoreS3StorageSummary customerManagedS3Storage;
/**
* <p>
* Used to store data used by IoT SiteWise in an Amazon S3 bucket that you manage.
* </p>
*
* @param customerManagedS3Storage
* Used to store data used by IoT SiteWise in an Amazon S3 bucket that you manage.
*/
public void setCustomerManagedS3Storage(IotSiteWiseCustomerManagedDatastoreS3StorageSummary customerManagedS3Storage) {
this.customerManagedS3Storage = customerManagedS3Storage;
}
/**
* <p>
* Used to store data used by IoT SiteWise in an Amazon S3 bucket that you manage.
* </p>
*
* @return Used to store data used by IoT SiteWise in an Amazon S3 bucket that you manage.
*/
public IotSiteWiseCustomerManagedDatastoreS3StorageSummary getCustomerManagedS3Storage() {
return this.customerManagedS3Storage;
}
/**
* <p>
* Used to store data used by IoT SiteWise in an Amazon S3 bucket that you manage.
* </p>
*
* @param customerManagedS3Storage
* Used to store data used by IoT SiteWise in an Amazon S3 bucket that you manage.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public DatastoreIotSiteWiseMultiLayerStorageSummary withCustomerManagedS3Storage(
IotSiteWiseCustomerManagedDatastoreS3StorageSummary customerManagedS3Storage) {
setCustomerManagedS3Storage(customerManagedS3Storage);
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getCustomerManagedS3Storage() != null)
sb.append("CustomerManagedS3Storage: ").append(getCustomerManagedS3Storage());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof DatastoreIotSiteWiseMultiLayerStorageSummary == false)
return false;
DatastoreIotSiteWiseMultiLayerStorageSummary other = (DatastoreIotSiteWiseMultiLayerStorageSummary) obj;
if (other.getCustomerManagedS3Storage() == null ^ this.getCustomerManagedS3Storage() == null)
return false;
if (other.getCustomerManagedS3Storage() != null && other.getCustomerManagedS3Storage().equals(this.getCustomerManagedS3Storage()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getCustomerManagedS3Storage() == null) ? 0 : getCustomerManagedS3Storage().hashCode());
return hashCode;
}
@Override
public DatastoreIotSiteWiseMultiLayerStorageSummary clone() {
try {
return (DatastoreIotSiteWiseMultiLayerStorageSummary) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
@com.amazonaws.annotation.SdkInternalApi
@Override
public void marshall(ProtocolMarshaller protocolMarshaller) {
com.amazonaws.services.iotanalytics.model.transform.DatastoreIotSiteWiseMultiLayerStorageSummaryMarshaller.getInstance().marshall(this,
protocolMarshaller);
}
}
| apache-2.0 |
nickman/HeliosStreams | sql-compiler/src/main/java/com/heliosapm/streams/sqlbinder/TagPredicateCache.java | 10318 | /**
* Helios, OpenSource Monitoring
* Brought to you by the Helios Development Group
*
* Copyright 2007, Helios Development Group and individual contributors
* as indicated by the @author tags. See the copyright.txt file in the
* distribution for a full listing of individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*
*/
package com.heliosapm.streams.sqlbinder;
import java.sql.ResultSet;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.StringTokenizer;
import java.util.TreeMap;
import java.util.TreeSet;
import java.util.concurrent.Callable;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import javax.management.ObjectName;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.heliosapm.streams.sqlbinder.SQLWorker.ResultSetHandler;
import com.heliosapm.utils.config.ConfigurationHelper;
import com.heliosapm.utils.jmx.JMXHelper;
/**
* <p>Title: TagPredicateCache</p>
* <p>Description: Cache for tag query predicates keyed by the deep hash code of the raw tag predicates</p>
* <p>Company: Helios Development Group LLC</p>
* @author Whitehead (nwhitehead AT heliosdev DOT org)
* <p><code>com.heliosapm.streams.sqlbinder.TagPredicateCache</code></p>
*/
public class TagPredicateCache {
/** Instance Logger */
protected final Logger log = LogManager.getLogger(getClass());
/** A SQLWorker to execute lookups */
protected final SQLWorker sqlWorker;
/** The underlying guava cache of UIDs keyed by the deep hash code of the raw predicates */
protected final Cache<Integer, String[]> cache;
/** The cache stats ObjectName if stats are enabled */
protected final ObjectName objectName;
/** The configuration property name for the maximum size of the cache */
public static final String MAX_SIZE_PROP = "helios.search.catalog.predicatecache.maxsize";
/** The configuration property name for the concurrency of the cache */
public static final String CONCURRENCY_PROP = "helios.search.catalog.predicatecache.concurrency";
/** The configuration property name for the stats enablement of the cache */
public static final String STATS_ENABLED_PROP = "helios.search.catalog.predicatecache.stats";
/** The default maximum size of the cache */
public static final long DEFAULT_MAX_SIZE = 1000;
/** The default concurrency of the cache */
public static final int DEFAULT_CONCURRENCY = 4;
/** The default stats enablement of the cache */
public static final boolean DEFAULT_STATS_ENABLED = true;
/** The configuration property with stats enablement of the cache */
public static final String SPEC_TEMPLATE_WSTATS = "concurrencyLevel=%s,initialCapacity=%s,maximumSize=%s,recordStats";
/** The configuration property without stats enablement of the cache */
public static final String SPEC_TEMPLATE_NOSTATS = "concurrencyLevel=%s,initialCapacity=%s,maximumSize=%s";
/** The load sql fragment to be UNION ALLed */
public static final String LOAD_SQL = "SELECT DISTINCT P.XUID FROM TSD_TAGPAIR P, TSD_TAGK K, TSD_TAGV V WHERE P.TAGK = K.XUID AND P.TAGV = V.XUID AND ";
// ((K.NAME = 'dc') AND (V.NAME = 'dc1'))
/** The dynamic binding SQL block for tag keys */
public static final String TAGK_SQL_BLOCK = "K.NAME %s ?";
/** The dynamic binding SQL block for tag values */
public static final String TAGV_SQL_BLOCK = "V.NAME %s ?";
/**
* Creates a new TagPredicateCache
* @param sqlWorker A SQLWorker to execute lookups and inserts
*/
public TagPredicateCache(SQLWorker sqlWorker) {
this.sqlWorker = sqlWorker;
final long maxSize = ConfigurationHelper.getLongSystemThenEnvProperty(MAX_SIZE_PROP, DEFAULT_MAX_SIZE);
final int concurrency = ConfigurationHelper.getIntSystemThenEnvProperty(CONCURRENCY_PROP, DEFAULT_CONCURRENCY);
final boolean stats = ConfigurationHelper.getBooleanSystemThenEnvProperty(STATS_ENABLED_PROP, DEFAULT_STATS_ENABLED);
final String spec = String.format(stats ? SPEC_TEMPLATE_WSTATS : SPEC_TEMPLATE_NOSTATS, concurrency, 100, maxSize);
cache = CacheBuilder.from(spec).build();
if(stats) {
objectName = JMXHelper.objectName(new StringBuilder(getClass().getPackage().getName()).append(":service=TagPredicateCache"));
JMXHelper.registerMBean(objectName, new CacheStatistics(cache, objectName));
} else {
objectName = null;
}
}
public PredicateBuilder newPredicateBuilder() {
return new PredicateBuilder();
}
private class PredicateRetriever implements Callable<String[]>, ResultSetHandler {
final PredicateBuilder pb;
final Set<String> results = new LinkedHashSet<String>();
public PredicateRetriever(PredicateBuilder pb) {
this.pb = pb;
}
@Override
public String[] call() throws Exception {
StringBuilder b = new StringBuilder();
boolean first = true;
List<Object> binds = new ArrayList<Object>();
for(Map.Entry<String, String> entry: pb.pairPredicates.entrySet()) {
if(first) {
first = false;
} else {
b.append(" INTERSECT ");
}
b.append(LOAD_SQL);
b.append("(")
.append("(").append(expandPredicate(entry.getKey(), TAGK_SQL_BLOCK, binds)).append(")")
.append(" AND ")
.append("(").append(expandPredicate(entry.getValue(), TAGV_SQL_BLOCK, binds)).append(")")
.append(") ");
}
log.info("Executing SQL [{}]", fillInSQL(b.toString(), binds));
sqlWorker.executeQuery(b.toString(), this, binds.toArray(new Object[0]));
return results.toArray(new String[results.size()]);
}
/**
* {@inheritDoc}
* @see com.heliosapm.streams.sqlbinder.SQLWorker.ResultSetHandler#onRow(int, int, java.lang.Object[])
*/
@Override
public boolean onRow(final int rowId, final int columnCount, final ResultSet rset) {
try {
results.add(rset.getString(1));
} catch (Exception ex) {
ex.printStackTrace(System.err);
}
return true;
}
}
private static final Pattern Q_PATTERN = Pattern.compile("\\?");
public static String fillInSQL(String sql, List<Object> binds) {
final int bindCnt = binds.size();
Matcher m = Q_PATTERN.matcher(sql);
for(int i = 0; i < bindCnt; i++) {
Object bind = binds.get(i);
if(bind instanceof CharSequence) {
sql = m.replaceFirst("'" + bind.toString() + "'");
} else {
sql = m.replaceFirst(bind.toString());
}
m = Q_PATTERN.matcher(sql);
}
return sql;
}
public class PredicateBuilder {
final TreeSet<String> keyPredicates = new TreeSet<String>();
final TreeSet<String> valuePredicates = new TreeSet<String>();
final TreeMap<String, String> pairPredicates = new TreeMap<String, String>();
private volatile String[] preds = {};
public PredicateBuilder appendKeys(String...keys) {
for(String s: keys) {
if(s!=null && !s.trim().isEmpty()) {
if(keyPredicates.add(s.trim())) preds = null;
}
}
return this;
}
public PredicateBuilder appendValues(String...values) {
for(String s: values) {
if(s!=null && !s.trim().isEmpty()) {
if(valuePredicates.add(s.trim())) preds = null;
}
}
return this;
}
public PredicateBuilder appendTags(Map<String, String> tags) {
if(tags==null || tags.isEmpty()) return this;
for(Map.Entry<String, String> e: tags.entrySet()) {
pairPredicates.put(e.getKey().trim(), e.getValue().trim());
}
preds = null;
return this;
}
public int hashCode() {
if(preds==null) {
// final int k = keyPredicates.size();
// final int v = valuePredicates.size();
final int p = pairPredicates.size();
preds = new String[p];
System.arraycopy(valuePredicates.toArray(new String[p]), 0, preds, 0, p);
// System.arraycopy(keyPredicates.toArray(new String[k]), 0, preds, 0, k);
// System.arraycopy(valuePredicates.toArray(new String[v]), 0, preds, k-1, v);
}
return preds.length==0 ? 0 : Arrays.deepHashCode(preds);
}
public String[] get() throws Exception {
// final int key = hashCode();
return cache.get(hashCode(), new PredicateRetriever(this));
}
private PredicateBuilder() {}
}
/**
* Expands a SQL predicate for wildcards and multis
* @param value The value expression
* @param predicateBase The constant predicate base format
* @param binds The bind variable accumulator
* @return the expanded predicate
*/
public static final String expandPredicate(final String value, final String predicateBase, final List<Object> binds) {
final StringTokenizer st = new StringTokenizer(value.replace(" ", ""), "|", false);
final int segmentCount = st.countTokens();
if(segmentCount<1) throw new RuntimeException("Failed to parse expression [" + value + "]. Segment count was 0");
if(segmentCount==1) {
String val = st.nextToken();
binds.add(val.replace('*', '%'));
return predicateBase.replace("%s", val.indexOf('*')==-1 ? "=" : "LIKE");
}
StringBuilder b = new StringBuilder();
for(int i = 0; i < segmentCount; i++) {
if(i!=0) b.append(" OR ");
String val = st.nextToken();
binds.add(val.replace('*', '%'));
b.append(predicateBase.replace("%s", val.indexOf('*')==-1 ? "=" : "LIKE"));
}
return b.toString();
}
}
| apache-2.0 |
habuma/spring-boot | spring-boot-project/spring-boot-test/src/main/java/org/springframework/boot/test/web/client/TestRestTemplate.java | 48129 | /*
* Copyright 2012-2018 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.test.web.client;
import java.io.IOException;
import java.lang.reflect.Field;
import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.http.client.HttpClient;
import org.apache.http.client.config.CookieSpecs;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.config.RequestConfig.Builder;
import org.apache.http.client.protocol.HttpClientContext;
import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
import org.apache.http.conn.ssl.TrustSelfSignedStrategy;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.protocol.HttpContext;
import org.apache.http.ssl.SSLContextBuilder;
import org.springframework.boot.web.client.RestTemplateBuilder;
import org.springframework.boot.web.client.RootUriTemplateHandler;
import org.springframework.core.ParameterizedTypeReference;
import org.springframework.http.HttpEntity;
import org.springframework.http.HttpHeaders;
import org.springframework.http.HttpMethod;
import org.springframework.http.RequestEntity;
import org.springframework.http.ResponseEntity;
import org.springframework.http.client.ClientHttpRequestFactory;
import org.springframework.http.client.ClientHttpRequestInterceptor;
import org.springframework.http.client.ClientHttpResponse;
import org.springframework.http.client.HttpComponentsClientHttpRequestFactory;
import org.springframework.http.client.InterceptingClientHttpRequestFactory;
import org.springframework.http.client.support.BasicAuthorizationInterceptor;
import org.springframework.util.Assert;
import org.springframework.util.ReflectionUtils;
import org.springframework.web.client.DefaultResponseErrorHandler;
import org.springframework.web.client.RequestCallback;
import org.springframework.web.client.ResponseExtractor;
import org.springframework.web.client.RestClientException;
import org.springframework.web.client.RestTemplate;
import org.springframework.web.util.DefaultUriBuilderFactory;
import org.springframework.web.util.UriTemplateHandler;
/**
* Convenient alternative of {@link RestTemplate} that is suitable for integration tests.
* They are fault tolerant, and optionally can carry Basic authentication headers. If
* Apache Http Client 4.3.2 or better is available (recommended) it will be used as the
* client, and by default configured to ignore cookies and redirects.
* <p>
* Note: To prevent injection problems this class intentionally does not extend
* {@link RestTemplate}. If you need access to the underlying {@link RestTemplate} use
* {@link #getRestTemplate()}.
* <p>
* If you are using the
* {@link org.springframework.boot.test.context.SpringBootTest @SpringBootTest}
* annotation, a {@link TestRestTemplate} is automatically available and can be
* {@code @Autowired} into your test. If you need customizations (for example to adding
* additional message converters) use a {@link RestTemplateBuilder} {@code @Bean}.
*
* @author Dave Syer
* @author Phillip Webb
* @author Andy Wilkinson
* @author Kristine Jetzke
* @since 1.4.0
*/
public class TestRestTemplate {
private final RestTemplate restTemplate;
private final HttpClientOption[] httpClientOptions;
/**
* Create a new {@link TestRestTemplate} instance.
* @param restTemplateBuilder builder used to configure underlying
* {@link RestTemplate}
* @since 1.4.1
*/
public TestRestTemplate(RestTemplateBuilder restTemplateBuilder) {
this(restTemplateBuilder, null, null);
}
/**
* Create a new {@link TestRestTemplate} instance.
* @param httpClientOptions client options to use if the Apache HTTP Client is used
*/
public TestRestTemplate(HttpClientOption... httpClientOptions) {
this(null, null, httpClientOptions);
}
/**
* Create a new {@link TestRestTemplate} instance with the specified credentials.
* @param username the username to use (or {@code null})
* @param password the password (or {@code null})
* @param httpClientOptions client options to use if the Apache HTTP Client is used
*/
public TestRestTemplate(String username, String password,
HttpClientOption... httpClientOptions) {
this(new RestTemplateBuilder(), username, password, httpClientOptions);
}
/**
* Create a new {@link TestRestTemplate} instance with the specified credentials.
* @param restTemplateBuilder builder used to configure underlying
* {@link RestTemplate}
* @param username the username to use (or {@code null})
* @param password the password (or {@code null})
* @param httpClientOptions client options to use if the Apache HTTP Client is used
* @since 2.0.0
*/
public TestRestTemplate(RestTemplateBuilder restTemplateBuilder, String username,
String password, HttpClientOption... httpClientOptions) {
this(restTemplateBuilder == null ? null : restTemplateBuilder.build(), username,
password, httpClientOptions);
}
private TestRestTemplate(RestTemplate restTemplate, String username, String password,
HttpClientOption... httpClientOptions) {
Assert.notNull(restTemplate, "RestTemplate must not be null");
this.httpClientOptions = httpClientOptions;
if (getRequestFactoryClass(restTemplate).isAssignableFrom(
HttpComponentsClientHttpRequestFactory.class)) {
restTemplate.setRequestFactory(
new CustomHttpComponentsClientHttpRequestFactory(httpClientOptions));
}
addAuthentication(restTemplate, username, password);
restTemplate.setErrorHandler(new NoOpResponseErrorHandler());
this.restTemplate = restTemplate;
}
private Class<? extends ClientHttpRequestFactory> getRequestFactoryClass(
RestTemplate restTemplate) {
ClientHttpRequestFactory requestFactory = restTemplate.getRequestFactory();
if (InterceptingClientHttpRequestFactory.class
.isAssignableFrom(requestFactory.getClass())) {
Field requestFactoryField = ReflectionUtils.findField(RestTemplate.class,
"requestFactory");
ReflectionUtils.makeAccessible(requestFactoryField);
requestFactory = (ClientHttpRequestFactory) ReflectionUtils
.getField(requestFactoryField, restTemplate);
}
return requestFactory.getClass();
}
private void addAuthentication(RestTemplate restTemplate, String username,
String password) {
if (username == null) {
return;
}
List<ClientHttpRequestInterceptor> interceptors = restTemplate.getInterceptors();
if (interceptors == null) {
interceptors = Collections.emptyList();
}
interceptors = new ArrayList<>(interceptors);
interceptors.removeIf(BasicAuthorizationInterceptor.class::isInstance);
interceptors.add(new BasicAuthorizationInterceptor(username, password));
restTemplate.setInterceptors(interceptors);
}
/**
* Configure the {@link UriTemplateHandler} to use to expand URI templates. By default
* the {@link DefaultUriBuilderFactory} is used which relies on Spring's URI template
* support and exposes several useful properties that customize its behavior for
* encoding and for prepending a common base URL. An alternative implementation may be
* used to plug an external URI template library.
* @param handler the URI template handler to use
*/
public void setUriTemplateHandler(UriTemplateHandler handler) {
this.restTemplate.setUriTemplateHandler(handler);
}
/**
* Returns the root URI applied by a {@link RootUriTemplateHandler} or {@code ""} if
* the root URI is not available.
* @return the root URI
*/
public String getRootUri() {
UriTemplateHandler uriTemplateHandler = this.restTemplate.getUriTemplateHandler();
if (uriTemplateHandler instanceof RootUriTemplateHandler) {
return ((RootUriTemplateHandler) uriTemplateHandler).getRootUri();
}
return "";
}
/**
* Retrieve a representation by doing a GET on the specified URL. The response (if
* any) is converted and returned.
* <p>
* URI Template variables are expanded using the given URI variables, if any.
* @param url the URL
* @param responseType the type of the return value
* @param urlVariables the variables to expand the template
* @param <T> the type of the return value
* @return the converted object
* @throws RestClientException on client-side HTTP error on client-side HTTP error
* @see RestTemplate#getForObject(String, Class, Object...)
*/
public <T> T getForObject(String url, Class<T> responseType, Object... urlVariables)
throws RestClientException {
return this.restTemplate.getForObject(url, responseType, urlVariables);
}
/**
* Retrieve a representation by doing a GET on the URI template. The response (if any)
* is converted and returned.
* <p>
* URI Template variables are expanded using the given map.
* @param url the URL
* @param responseType the type of the return value
* @param urlVariables the map containing variables for the URI template
* @param <T> the type of the return value
* @return the converted object
* @throws RestClientException on client-side HTTP error
* @see RestTemplate#getForObject(String, Class, Object...)
*/
public <T> T getForObject(String url, Class<T> responseType,
Map<String, ?> urlVariables) throws RestClientException {
return this.restTemplate.getForObject(url, responseType, urlVariables);
}
/**
* Retrieve a representation by doing a GET on the URL . The response (if any) is
* converted and returned.
* @param url the URL
* @param responseType the type of the return value
* @param <T> the type of the return value
* @return the converted object
* @throws RestClientException on client-side HTTP error
* @see RestTemplate#getForObject(java.net.URI, java.lang.Class)
*/
public <T> T getForObject(URI url, Class<T> responseType) throws RestClientException {
return this.restTemplate.getForObject(applyRootUriIfNecessary(url), responseType);
}
/**
* Retrieve an entity by doing a GET on the specified URL. The response is converted
* and stored in an {@link ResponseEntity}.
* <p>
* URI Template variables are expanded using the given URI variables, if any.
* @param url the URL
* @param responseType the type of the return value
* @param urlVariables the variables to expand the template
* @param <T> the type of the return value
* @return the entity
* @throws RestClientException on client-side HTTP error
* @see RestTemplate#getForEntity(java.lang.String, java.lang.Class,
* java.lang.Object[])
*/
public <T> ResponseEntity<T> getForEntity(String url, Class<T> responseType,
Object... urlVariables) throws RestClientException {
return this.restTemplate.getForEntity(url, responseType, urlVariables);
}
/**
* Retrieve a representation by doing a GET on the URI template. The response is
* converted and stored in an {@link ResponseEntity}.
* <p>
* URI Template variables are expanded using the given map.
* @param url the URL
* @param responseType the type of the return value
* @param urlVariables the map containing variables for the URI template
* @param <T> the type of the return value
* @return the converted object
* @throws RestClientException on client-side HTTP error
* @see RestTemplate#getForEntity(java.lang.String, java.lang.Class, java.util.Map)
*/
public <T> ResponseEntity<T> getForEntity(String url, Class<T> responseType,
Map<String, ?> urlVariables) throws RestClientException {
return this.restTemplate.getForEntity(url, responseType, urlVariables);
}
/**
* Retrieve a representation by doing a GET on the URL . The response is converted and
* stored in an {@link ResponseEntity}.
* @param url the URL
* @param responseType the type of the return value
* @param <T> the type of the return value
* @return the converted object
* @throws RestClientException on client-side HTTP error
* @see RestTemplate#getForEntity(java.net.URI, java.lang.Class)
*/
public <T> ResponseEntity<T> getForEntity(URI url, Class<T> responseType)
throws RestClientException {
return this.restTemplate.getForEntity(applyRootUriIfNecessary(url), responseType);
}
/**
* Retrieve all headers of the resource specified by the URI template.
* <p>
* URI Template variables are expanded using the given URI variables, if any.
* @param url the URL
* @param urlVariables the variables to expand the template
* @return all HTTP headers of that resource
* @throws RestClientException on client-side HTTP error
* @see RestTemplate#headForHeaders(java.lang.String, java.lang.Object[])
*/
public HttpHeaders headForHeaders(String url, Object... urlVariables)
throws RestClientException {
return this.restTemplate.headForHeaders(url, urlVariables);
}
/**
* Retrieve all headers of the resource specified by the URI template.
* <p>
* URI Template variables are expanded using the given map.
* @param url the URL
* @param urlVariables the map containing variables for the URI template
* @return all HTTP headers of that resource
* @throws RestClientException on client-side HTTP error
* @see RestTemplate#headForHeaders(java.lang.String, java.util.Map)
*/
public HttpHeaders headForHeaders(String url, Map<String, ?> urlVariables)
throws RestClientException {
return this.restTemplate.headForHeaders(url, urlVariables);
}
/**
* Retrieve all headers of the resource specified by the URL.
* @param url the URL
* @return all HTTP headers of that resource
* @throws RestClientException on client-side HTTP error
* @see RestTemplate#headForHeaders(java.net.URI)
*/
public HttpHeaders headForHeaders(URI url) throws RestClientException {
return this.restTemplate.headForHeaders(applyRootUriIfNecessary(url));
}
/**
* Create a new resource by POSTing the given object to the URI template, and returns
* the value of the {@code Location} header. This header typically indicates where the
* new resource is stored.
* <p>
* URI Template variables are expanded using the given URI variables, if any.
* <p>
* The {@code request} parameter can be a {@link HttpEntity} in order to add
* additional HTTP headers to the request.
* @param url the URL
* @param request the Object to be POSTed, may be {@code null}
* @param urlVariables the variables to expand the template
* @return the value for the {@code Location} header
* @throws RestClientException on client-side HTTP error
* @see HttpEntity
* @see RestTemplate#postForLocation(java.lang.String, java.lang.Object,
* java.lang.Object[])
*/
public URI postForLocation(String url, Object request, Object... urlVariables)
throws RestClientException {
return this.restTemplate.postForLocation(url, request, urlVariables);
}
/**
* Create a new resource by POSTing the given object to the URI template, and returns
* the value of the {@code Location} header. This header typically indicates where the
* new resource is stored.
* <p>
* URI Template variables are expanded using the given map.
* <p>
* The {@code request} parameter can be a {@link HttpEntity} in order to add
* additional HTTP headers to the request.
* @param url the URL
* @param request the Object to be POSTed, may be {@code null}
* @param urlVariables the variables to expand the template
* @return the value for the {@code Location} header
* @throws RestClientException on client-side HTTP error
* @see HttpEntity
* @see RestTemplate#postForLocation(java.lang.String, java.lang.Object,
* java.util.Map)
*/
public URI postForLocation(String url, Object request, Map<String, ?> urlVariables)
throws RestClientException {
return this.restTemplate.postForLocation(url, request, urlVariables);
}
/**
* Create a new resource by POSTing the given object to the URL, and returns the value
* of the {@code Location} header. This header typically indicates where the new
* resource is stored.
* <p>
* The {@code request} parameter can be a {@link HttpEntity} in order to add
* additional HTTP headers to the request.
* @param url the URL
* @param request the Object to be POSTed, may be {@code null}
* @return the value for the {@code Location} header
* @throws RestClientException on client-side HTTP error
* @see HttpEntity
* @see RestTemplate#postForLocation(java.net.URI, java.lang.Object)
*/
public URI postForLocation(URI url, Object request) throws RestClientException {
return this.restTemplate.postForLocation(applyRootUriIfNecessary(url), request);
}
/**
* Create a new resource by POSTing the given object to the URI template, and returns
* the representation found in the response.
* <p>
* URI Template variables are expanded using the given URI variables, if any.
* <p>
* The {@code request} parameter can be a {@link HttpEntity} in order to add
* additional HTTP headers to the request.
* @param url the URL
* @param request the Object to be POSTed, may be {@code null}
* @param responseType the type of the return value
* @param urlVariables the variables to expand the template
* @param <T> the type of the return value
* @return the converted object
* @throws RestClientException on client-side HTTP error
* @see HttpEntity
* @see RestTemplate#postForObject(java.lang.String, java.lang.Object,
* java.lang.Class, java.lang.Object[])
*/
public <T> T postForObject(String url, Object request, Class<T> responseType,
Object... urlVariables) throws RestClientException {
return this.restTemplate.postForObject(url, request, responseType, urlVariables);
}
/**
* Create a new resource by POSTing the given object to the URI template, and returns
* the representation found in the response.
* <p>
* URI Template variables are expanded using the given map.
* <p>
* The {@code request} parameter can be a {@link HttpEntity} in order to add
* additional HTTP headers to the request.
* @param url the URL
* @param request the Object to be POSTed, may be {@code null}
* @param responseType the type of the return value
* @param urlVariables the variables to expand the template
* @param <T> the type of the return value
* @return the converted object
* @throws RestClientException on client-side HTTP error
* @see HttpEntity
* @see RestTemplate#postForObject(java.lang.String, java.lang.Object,
* java.lang.Class, java.util.Map)
*/
public <T> T postForObject(String url, Object request, Class<T> responseType,
Map<String, ?> urlVariables) throws RestClientException {
return this.restTemplate.postForObject(url, request, responseType, urlVariables);
}
/**
* Create a new resource by POSTing the given object to the URL, and returns the
* representation found in the response.
* <p>
* The {@code request} parameter can be a {@link HttpEntity} in order to add
* additional HTTP headers to the request.
* @param url the URL
* @param request the Object to be POSTed, may be {@code null}
* @param responseType the type of the return value
* @param <T> the type of the return value
* @return the converted object
* @throws RestClientException on client-side HTTP error
* @see HttpEntity
* @see RestTemplate#postForObject(java.net.URI, java.lang.Object, java.lang.Class)
*/
public <T> T postForObject(URI url, Object request, Class<T> responseType)
throws RestClientException {
return this.restTemplate.postForObject(applyRootUriIfNecessary(url), request,
responseType);
}
/**
* Create a new resource by POSTing the given object to the URI template, and returns
* the response as {@link ResponseEntity}.
* <p>
* URI Template variables are expanded using the given URI variables, if any.
* <p>
* The {@code request} parameter can be a {@link HttpEntity} in order to add
* additional HTTP headers to the request.
* @param url the URL
* @param request the Object to be POSTed, may be {@code null}
* @param responseType the response type to return
* @param urlVariables the variables to expand the template
* @param <T> the type of the return value
* @return the converted object
* @throws RestClientException on client-side HTTP error
* @see HttpEntity
* @see RestTemplate#postForEntity(java.lang.String, java.lang.Object,
* java.lang.Class, java.lang.Object[])
*/
public <T> ResponseEntity<T> postForEntity(String url, Object request,
Class<T> responseType, Object... urlVariables) throws RestClientException {
return this.restTemplate.postForEntity(url, request, responseType, urlVariables);
}
/**
* Create a new resource by POSTing the given object to the URI template, and returns
* the response as {@link HttpEntity}.
* <p>
* URI Template variables are expanded using the given map.
* <p>
* The {@code request} parameter can be a {@link HttpEntity} in order to add
* additional HTTP headers to the request.
* @param url the URL
* @param request the Object to be POSTed, may be {@code null}
* @param responseType the response type to return
* @param urlVariables the variables to expand the template
* @param <T> the type of the return value
* @return the converted object
* @throws RestClientException on client-side HTTP error
* @see HttpEntity
* @see RestTemplate#postForEntity(java.lang.String, java.lang.Object,
* java.lang.Class, java.util.Map)
*/
public <T> ResponseEntity<T> postForEntity(String url, Object request,
Class<T> responseType, Map<String, ?> urlVariables)
throws RestClientException {
return this.restTemplate.postForEntity(url, request, responseType, urlVariables);
}
/**
* Create a new resource by POSTing the given object to the URL, and returns the
* response as {@link ResponseEntity}.
* <p>
* The {@code request} parameter can be a {@link HttpEntity} in order to add
* additional HTTP headers to the request.
* @param url the URL
* @param request the Object to be POSTed, may be {@code null}
* @param responseType the response type to return
* @param <T> the type of the return value
* @return the converted object
* @throws RestClientException on client-side HTTP error
* @see HttpEntity
* @see RestTemplate#postForEntity(java.net.URI, java.lang.Object, java.lang.Class)
*/
public <T> ResponseEntity<T> postForEntity(URI url, Object request,
Class<T> responseType) throws RestClientException {
return this.restTemplate.postForEntity(applyRootUriIfNecessary(url), request,
responseType);
}
/**
* Create or update a resource by PUTting the given object to the URI.
* <p>
* URI Template variables are expanded using the given URI variables, if any.
* <p>
* The {@code request} parameter can be a {@link HttpEntity} in order to add
* additional HTTP headers to the request.
* @param url the URL
* @param request the Object to be PUT, may be {@code null}
* @param urlVariables the variables to expand the template
* @throws RestClientException on client-side HTTP error
* @see HttpEntity
* @see RestTemplate#put(java.lang.String, java.lang.Object, java.lang.Object[])
*/
public void put(String url, Object request, Object... urlVariables)
throws RestClientException {
this.restTemplate.put(url, request, urlVariables);
}
/**
* Creates a new resource by PUTting the given object to URI template.
* <p>
* URI Template variables are expanded using the given map.
* <p>
* The {@code request} parameter can be a {@link HttpEntity} in order to add
* additional HTTP headers to the request.
* @param url the URL
* @param request the Object to be PUT, may be {@code null}
* @param urlVariables the variables to expand the template
* @throws RestClientException on client-side HTTP error
* @see HttpEntity
* @see RestTemplate#put(java.lang.String, java.lang.Object, java.util.Map)
*/
public void put(String url, Object request, Map<String, ?> urlVariables)
throws RestClientException {
this.restTemplate.put(url, request, urlVariables);
}
/**
* Creates a new resource by PUTting the given object to URL.
* <p>
* The {@code request} parameter can be a {@link HttpEntity} in order to add
* additional HTTP headers to the request.
* @param url the URL
* @param request the Object to be PUT, may be {@code null}
* @throws RestClientException on client-side HTTP error
* @see HttpEntity
* @see RestTemplate#put(java.net.URI, java.lang.Object)
*/
public void put(URI url, Object request) throws RestClientException {
this.restTemplate.put(applyRootUriIfNecessary(url), request);
}
/**
* Update a resource by PATCHing the given object to the URI template, and returns the
* representation found in the response.
* <p>
* URI Template variables are expanded using the given URI variables, if any.
* <p>
* The {@code request} parameter can be a {@link HttpEntity} in order to add
* additional HTTP headers to the request.
* @param url the URL
* @param request the Object to be PATCHed, may be {@code null}
* @param responseType the type of the return value
* @param uriVariables the variables to expand the template
* @param <T> the type of the return value
* @return the converted object
* @throws RestClientException on client-side HTTP error
* @since 1.4.4
* @see HttpEntity
*/
public <T> T patchForObject(String url, Object request, Class<T> responseType,
Object... uriVariables) throws RestClientException {
return this.restTemplate.patchForObject(url, request, responseType, uriVariables);
}
/**
* Update a resource by PATCHing the given object to the URI template, and returns the
* representation found in the response.
* <p>
* URI Template variables are expanded using the given map.
* <p>
* The {@code request} parameter can be a {@link HttpEntity} in order to add
* additional HTTP headers to the request.
* @param url the URL
* @param request the Object to be PATCHed, may be {@code null}
* @param responseType the type of the return value
* @param uriVariables the variables to expand the template
* @param <T> the type of the return value
* @return the converted object
* @throws RestClientException on client-side HTTP error
* @since 1.4.4
* @see HttpEntity
*/
public <T> T patchForObject(String url, Object request, Class<T> responseType,
Map<String, ?> uriVariables) throws RestClientException {
return this.restTemplate.patchForObject(url, request, responseType, uriVariables);
}
/**
* Update a resource by PATCHing the given object to the URL, and returns the
* representation found in the response.
* <p>
* The {@code request} parameter can be a {@link HttpEntity} in order to add
* additional HTTP headers to the request.
* @param url the URL
* @param request the Object to be POSTed, may be {@code null}
* @param responseType the type of the return value
* @param <T> the type of the return value
* @return the converted object
* @throws RestClientException on client-side HTTP error
* @since 1.4.4
* @see HttpEntity
*/
public <T> T patchForObject(URI url, Object request, Class<T> responseType)
throws RestClientException {
return this.restTemplate.patchForObject(applyRootUriIfNecessary(url), request,
responseType);
}
/**
* Delete the resources at the specified URI.
* <p>
* URI Template variables are expanded using the given URI variables, if any.
* @param url the URL
* @param urlVariables the variables to expand in the template
* @throws RestClientException on client-side HTTP error
* @see RestTemplate#delete(java.lang.String, java.lang.Object[])
*/
public void delete(String url, Object... urlVariables) throws RestClientException {
this.restTemplate.delete(url, urlVariables);
}
/**
* Delete the resources at the specified URI.
* <p>
* URI Template variables are expanded using the given map.
* @param url the URL
* @param urlVariables the variables to expand the template
* @throws RestClientException on client-side HTTP error
* @see RestTemplate#delete(java.lang.String, java.util.Map)
*/
public void delete(String url, Map<String, ?> urlVariables)
throws RestClientException {
this.restTemplate.delete(url, urlVariables);
}
/**
* Delete the resources at the specified URL.
* @param url the URL
* @throws RestClientException on client-side HTTP error
* @see RestTemplate#delete(java.net.URI)
*/
public void delete(URI url) throws RestClientException {
this.restTemplate.delete(applyRootUriIfNecessary(url));
}
/**
* Return the value of the Allow header for the given URI.
* <p>
* URI Template variables are expanded using the given URI variables, if any.
* @param url the URL
* @param urlVariables the variables to expand in the template
* @return the value of the allow header
* @throws RestClientException on client-side HTTP error
* @see RestTemplate#optionsForAllow(java.lang.String, java.lang.Object[])
*/
public Set<HttpMethod> optionsForAllow(String url, Object... urlVariables)
throws RestClientException {
return this.restTemplate.optionsForAllow(url, urlVariables);
}
/**
* Return the value of the Allow header for the given URI.
* <p>
* URI Template variables are expanded using the given map.
* @param url the URL
* @param urlVariables the variables to expand in the template
* @return the value of the allow header
* @throws RestClientException on client-side HTTP error
* @see RestTemplate#optionsForAllow(java.lang.String, java.util.Map)
*/
public Set<HttpMethod> optionsForAllow(String url, Map<String, ?> urlVariables)
throws RestClientException {
return this.restTemplate.optionsForAllow(url, urlVariables);
}
/**
* Return the value of the Allow header for the given URL.
* @param url the URL
* @return the value of the allow header
* @throws RestClientException on client-side HTTP error
* @see RestTemplate#optionsForAllow(java.net.URI)
*/
public Set<HttpMethod> optionsForAllow(URI url) throws RestClientException {
return this.restTemplate.optionsForAllow(applyRootUriIfNecessary(url));
}
/**
* Execute the HTTP method to the given URI template, writing the given request entity
* to the request, and returns the response as {@link ResponseEntity}.
* <p>
* URI Template variables are expanded using the given URI variables, if any.
* @param url the URL
* @param method the HTTP method (GET, POST, etc)
* @param requestEntity the entity (headers and/or body) to write to the request, may
* be {@code null}
* @param responseType the type of the return value
* @param urlVariables the variables to expand in the template
* @param <T> the type of the return value
* @return the response as entity
* @throws RestClientException on client-side HTTP error
* @see RestTemplate#exchange(java.lang.String, org.springframework.http.HttpMethod,
* org.springframework.http.HttpEntity, java.lang.Class, java.lang.Object[])
*/
public <T> ResponseEntity<T> exchange(String url, HttpMethod method,
HttpEntity<?> requestEntity, Class<T> responseType, Object... urlVariables)
throws RestClientException {
return this.restTemplate.exchange(url, method, requestEntity, responseType,
urlVariables);
}
/**
* Execute the HTTP method to the given URI template, writing the given request entity
* to the request, and returns the response as {@link ResponseEntity}.
* <p>
* URI Template variables are expanded using the given URI variables, if any.
* @param url the URL
* @param method the HTTP method (GET, POST, etc)
* @param requestEntity the entity (headers and/or body) to write to the request, may
* be {@code null}
* @param responseType the type of the return value
* @param urlVariables the variables to expand in the template
* @param <T> the type of the return value
* @return the response as entity
* @throws RestClientException on client-side HTTP error
* @see RestTemplate#exchange(java.lang.String, org.springframework.http.HttpMethod,
* org.springframework.http.HttpEntity, java.lang.Class, java.util.Map)
*/
public <T> ResponseEntity<T> exchange(String url, HttpMethod method,
HttpEntity<?> requestEntity, Class<T> responseType,
Map<String, ?> urlVariables) throws RestClientException {
return this.restTemplate.exchange(url, method, requestEntity, responseType,
urlVariables);
}
/**
* Execute the HTTP method to the given URI template, writing the given request entity
* to the request, and returns the response as {@link ResponseEntity}.
* @param url the URL
* @param method the HTTP method (GET, POST, etc)
* @param requestEntity the entity (headers and/or body) to write to the request, may
* be {@code null}
* @param responseType the type of the return value
* @param <T> the type of the return value
* @return the response as entity
* @throws RestClientException on client-side HTTP error
* @see RestTemplate#exchange(java.net.URI, org.springframework.http.HttpMethod,
* org.springframework.http.HttpEntity, java.lang.Class)
*/
public <T> ResponseEntity<T> exchange(URI url, HttpMethod method,
HttpEntity<?> requestEntity, Class<T> responseType)
throws RestClientException {
return this.restTemplate.exchange(applyRootUriIfNecessary(url), method,
requestEntity, responseType);
}
/**
* Execute the HTTP method to the given URI template, writing the given request entity
* to the request, and returns the response as {@link ResponseEntity}. The given
* {@link ParameterizedTypeReference} is used to pass generic type information:
* <pre class="code">
* ParameterizedTypeReference<List<MyBean>> myBean = new ParameterizedTypeReference<List<MyBean>>() {};
* ResponseEntity<List<MyBean>> response = template.exchange("http://example.com",HttpMethod.GET, null, myBean);
* </pre>
* @param url the URL
* @param method the HTTP method (GET, POST, etc)
* @param requestEntity the entity (headers and/or body) to write to the request, may
* be {@code null}
* @param responseType the type of the return value
* @param urlVariables the variables to expand in the template
* @param <T> the type of the return value
* @return the response as entity
* @throws RestClientException on client-side HTTP error
* @see RestTemplate#exchange(java.lang.String, org.springframework.http.HttpMethod,
* org.springframework.http.HttpEntity,
* org.springframework.core.ParameterizedTypeReference, java.lang.Object[])
*/
public <T> ResponseEntity<T> exchange(String url, HttpMethod method,
HttpEntity<?> requestEntity, ParameterizedTypeReference<T> responseType,
Object... urlVariables) throws RestClientException {
return this.restTemplate.exchange(url, method, requestEntity, responseType,
urlVariables);
}
/**
* Execute the HTTP method to the given URI template, writing the given request entity
* to the request, and returns the response as {@link ResponseEntity}. The given
* {@link ParameterizedTypeReference} is used to pass generic type information:
* <pre class="code">
* ParameterizedTypeReference<List<MyBean>> myBean = new ParameterizedTypeReference<List<MyBean>>() {};
* ResponseEntity<List<MyBean>> response = template.exchange("http://example.com",HttpMethod.GET, null, myBean);
* </pre>
* @param url the URL
* @param method the HTTP method (GET, POST, etc)
* @param requestEntity the entity (headers and/or body) to write to the request, may
* be {@code null}
* @param responseType the type of the return value
* @param urlVariables the variables to expand in the template
* @param <T> the type of the return value
* @return the response as entity
* @throws RestClientException on client-side HTTP error
* @see RestTemplate#exchange(java.lang.String, org.springframework.http.HttpMethod,
* org.springframework.http.HttpEntity,
* org.springframework.core.ParameterizedTypeReference, java.util.Map)
*/
public <T> ResponseEntity<T> exchange(String url, HttpMethod method,
HttpEntity<?> requestEntity, ParameterizedTypeReference<T> responseType,
Map<String, ?> urlVariables) throws RestClientException {
return this.restTemplate.exchange(url, method, requestEntity, responseType,
urlVariables);
}
/**
* Execute the HTTP method to the given URI template, writing the given request entity
* to the request, and returns the response as {@link ResponseEntity}. The given
* {@link ParameterizedTypeReference} is used to pass generic type information:
* <pre class="code">
* ParameterizedTypeReference<List<MyBean>> myBean = new ParameterizedTypeReference<List<MyBean>>() {};
* ResponseEntity<List<MyBean>> response = template.exchange("http://example.com",HttpMethod.GET, null, myBean);
* </pre>
* @param url the URL
* @param method the HTTP method (GET, POST, etc)
* @param requestEntity the entity (headers and/or body) to write to the request, may
* be {@code null}
* @param responseType the type of the return value
* @param <T> the type of the return value
* @return the response as entity
* @throws RestClientException on client-side HTTP error
* @see RestTemplate#exchange(java.net.URI, org.springframework.http.HttpMethod,
* org.springframework.http.HttpEntity,
* org.springframework.core.ParameterizedTypeReference)
*/
public <T> ResponseEntity<T> exchange(URI url, HttpMethod method,
HttpEntity<?> requestEntity, ParameterizedTypeReference<T> responseType)
throws RestClientException {
return this.restTemplate.exchange(applyRootUriIfNecessary(url), method,
requestEntity, responseType);
}
/**
* Execute the request specified in the given {@link RequestEntity} and return the
* response as {@link ResponseEntity}. Typically used in combination with the static
* builder methods on {@code RequestEntity}, for instance: <pre class="code">
* MyRequest body = ...
* RequestEntity request = RequestEntity.post(new URI("http://example.com/foo")).accept(MediaType.APPLICATION_JSON).body(body);
* ResponseEntity<MyResponse> response = template.exchange(request, MyResponse.class);
* </pre>
* @param requestEntity the entity to write to the request
* @param responseType the type of the return value
* @param <T> the type of the return value
* @return the response as entity
* @throws RestClientException on client-side HTTP error
* @see RestTemplate#exchange(org.springframework.http.RequestEntity, java.lang.Class)
*/
public <T> ResponseEntity<T> exchange(RequestEntity<?> requestEntity,
Class<T> responseType) throws RestClientException {
return this.restTemplate.exchange(
createRequestEntityWithRootAppliedUri(requestEntity), responseType);
}
/**
* Execute the request specified in the given {@link RequestEntity} and return the
* response as {@link ResponseEntity}. The given {@link ParameterizedTypeReference} is
* used to pass generic type information: <pre class="code">
* MyRequest body = ...
* RequestEntity request = RequestEntity.post(new URI("http://example.com/foo")).accept(MediaType.APPLICATION_JSON).body(body);
* ParameterizedTypeReference<List<MyResponse>> myBean = new ParameterizedTypeReference<List<MyResponse>>() {};
* ResponseEntity<List<MyResponse>> response = template.exchange(request, myBean);
* </pre>
* @param requestEntity the entity to write to the request
* @param responseType the type of the return value
* @param <T> the type of the return value
* @return the response as entity
* @throws RestClientException on client-side HTTP error
* @see RestTemplate#exchange(org.springframework.http.RequestEntity,
* org.springframework.core.ParameterizedTypeReference)
*/
public <T> ResponseEntity<T> exchange(RequestEntity<?> requestEntity,
ParameterizedTypeReference<T> responseType) throws RestClientException {
return this.restTemplate.exchange(
createRequestEntityWithRootAppliedUri(requestEntity), responseType);
}
/**
* Execute the HTTP method to the given URI template, preparing the request with the
* {@link RequestCallback}, and reading the response with a {@link ResponseExtractor}.
* <p>
* URI Template variables are expanded using the given URI variables, if any.
* @param url the URL
* @param method the HTTP method (GET, POST, etc)
* @param requestCallback object that prepares the request
* @param responseExtractor object that extracts the return value from the response
* @param urlVariables the variables to expand in the template
* @param <T> the type of the return value
* @return an arbitrary object, as returned by the {@link ResponseExtractor}
* @throws RestClientException on client-side HTTP error
* @see RestTemplate#execute(java.lang.String, org.springframework.http.HttpMethod,
* org.springframework.web.client.RequestCallback,
* org.springframework.web.client.ResponseExtractor, java.lang.Object[])
*/
public <T> T execute(String url, HttpMethod method, RequestCallback requestCallback,
ResponseExtractor<T> responseExtractor, Object... urlVariables)
throws RestClientException {
return this.restTemplate.execute(url, method, requestCallback, responseExtractor,
urlVariables);
}
/**
* Execute the HTTP method to the given URI template, preparing the request with the
* {@link RequestCallback}, and reading the response with a {@link ResponseExtractor}.
* <p>
* URI Template variables are expanded using the given URI variables map.
* @param url the URL
* @param method the HTTP method (GET, POST, etc)
* @param requestCallback object that prepares the request
* @param responseExtractor object that extracts the return value from the response
* @param urlVariables the variables to expand in the template
* @param <T> the type of the return value
* @return an arbitrary object, as returned by the {@link ResponseExtractor}
* @throws RestClientException on client-side HTTP error
* @see RestTemplate#execute(java.lang.String, org.springframework.http.HttpMethod,
* org.springframework.web.client.RequestCallback,
* org.springframework.web.client.ResponseExtractor, java.util.Map)
*/
public <T> T execute(String url, HttpMethod method, RequestCallback requestCallback,
ResponseExtractor<T> responseExtractor, Map<String, ?> urlVariables)
throws RestClientException {
return this.restTemplate.execute(url, method, requestCallback, responseExtractor,
urlVariables);
}
/**
* Execute the HTTP method to the given URL, preparing the request with the
* {@link RequestCallback}, and reading the response with a {@link ResponseExtractor}.
* @param url the URL
* @param method the HTTP method (GET, POST, etc)
* @param requestCallback object that prepares the request
* @param responseExtractor object that extracts the return value from the response
* @param <T> the type of the return value
* @return an arbitrary object, as returned by the {@link ResponseExtractor}
* @throws RestClientException on client-side HTTP error
* @see RestTemplate#execute(java.net.URI, org.springframework.http.HttpMethod,
* org.springframework.web.client.RequestCallback,
* org.springframework.web.client.ResponseExtractor)
*/
public <T> T execute(URI url, HttpMethod method, RequestCallback requestCallback,
ResponseExtractor<T> responseExtractor) throws RestClientException {
return this.restTemplate.execute(applyRootUriIfNecessary(url), method,
requestCallback, responseExtractor);
}
/**
* Returns the underlying {@link RestTemplate} that is actually used to perform the
* REST operations.
* @return the restTemplate
*/
public RestTemplate getRestTemplate() {
return this.restTemplate;
}
/**
* Creates a new {@code TestRestTemplate} with the same configuration as this one,
* except that it will send basic authorization headers using the given
* {@code username} and {@code password}.
* @param username the username
* @param password the password
* @return the new template
* @since 1.4.1
*/
public TestRestTemplate withBasicAuth(String username, String password) {
RestTemplate restTemplate = new RestTemplateBuilder()
.messageConverters(getRestTemplate().getMessageConverters())
.interceptors(getRestTemplate().getInterceptors())
.uriTemplateHandler(getRestTemplate().getUriTemplateHandler()).build();
TestRestTemplate testRestTemplate = new TestRestTemplate(restTemplate, username,
password, this.httpClientOptions);
testRestTemplate.getRestTemplate()
.setErrorHandler(getRestTemplate().getErrorHandler());
return testRestTemplate;
}
@SuppressWarnings({ "rawtypes", "unchecked" })
private RequestEntity<?> createRequestEntityWithRootAppliedUri(
RequestEntity<?> requestEntity) {
return new RequestEntity(requestEntity.getBody(), requestEntity.getHeaders(),
requestEntity.getMethod(),
applyRootUriIfNecessary(requestEntity.getUrl()), requestEntity.getType());
}
private URI applyRootUriIfNecessary(URI uri) {
UriTemplateHandler uriTemplateHandler = this.restTemplate.getUriTemplateHandler();
if ((uriTemplateHandler instanceof RootUriTemplateHandler)
&& uri.toString().startsWith("/")) {
return URI.create(((RootUriTemplateHandler) uriTemplateHandler).getRootUri()
+ uri.toString());
}
return uri;
}
/**
* Options used to customize the Apache Http Client if it is used.
*/
public enum HttpClientOption {
/**
* Enable cookies.
*/
ENABLE_COOKIES,
/**
* Enable redirects.
*/
ENABLE_REDIRECTS,
/**
* Use a {@link SSLConnectionSocketFactory} with {@link TrustSelfSignedStrategy}.
*/
SSL
}
/**
* {@link HttpComponentsClientHttpRequestFactory} to apply customizations.
*/
protected static class CustomHttpComponentsClientHttpRequestFactory
extends HttpComponentsClientHttpRequestFactory {
private final String cookieSpec;
private final boolean enableRedirects;
public CustomHttpComponentsClientHttpRequestFactory(
HttpClientOption[] httpClientOptions) {
Set<HttpClientOption> options = new HashSet<>(
Arrays.asList(httpClientOptions));
this.cookieSpec = (options.contains(HttpClientOption.ENABLE_COOKIES)
? CookieSpecs.STANDARD : CookieSpecs.IGNORE_COOKIES);
this.enableRedirects = options.contains(HttpClientOption.ENABLE_REDIRECTS);
if (options.contains(HttpClientOption.SSL)) {
setHttpClient(createSslHttpClient());
}
}
private HttpClient createSslHttpClient() {
try {
SSLConnectionSocketFactory socketFactory = new SSLConnectionSocketFactory(
new SSLContextBuilder()
.loadTrustMaterial(null, new TrustSelfSignedStrategy())
.build());
return HttpClients.custom().setSSLSocketFactory(socketFactory).build();
}
catch (Exception ex) {
throw new IllegalStateException("Unable to create SSL HttpClient", ex);
}
}
@Override
protected HttpContext createHttpContext(HttpMethod httpMethod, URI uri) {
HttpClientContext context = HttpClientContext.create();
context.setRequestConfig(getRequestConfig());
return context;
}
protected RequestConfig getRequestConfig() {
Builder builder = RequestConfig.custom().setCookieSpec(this.cookieSpec)
.setAuthenticationEnabled(false)
.setRedirectsEnabled(this.enableRedirects);
return builder.build();
}
}
private static class NoOpResponseErrorHandler extends DefaultResponseErrorHandler {
@Override
public void handleError(ClientHttpResponse response) throws IOException {
}
}
}
| apache-2.0 |
willnorris/java-openid | src/main/java/edu/internet2/middleware/openid/extensions/ax/impl/FetchResponseMarshaller.java | 3041 | /*
* Copyright [2009] [University Corporation for Advanced Internet Development, Inc.]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package edu.internet2.middleware.openid.extensions.ax.impl;
import java.util.List;
import java.util.Map;
import javax.xml.namespace.QName;
import edu.internet2.middleware.openid.common.NamespaceMap;
import edu.internet2.middleware.openid.common.ParameterMap;
import edu.internet2.middleware.openid.extensions.ax.AttributeExchange;
import edu.internet2.middleware.openid.extensions.ax.AttributeExchangeMarshaller;
import edu.internet2.middleware.openid.extensions.ax.FetchResponse;
import edu.internet2.middleware.openid.extensions.ax.AttributeExchange.Parameter;
/**
* FetchRequestMarshaller.
*/
public class FetchResponseMarshaller implements AttributeExchangeMarshaller<FetchResponse> {
/** {@inheritDoc} */
public void marshall(FetchResponse response, ParameterMap parameters) {
NamespaceMap types = new NamespaceMap();
types.setAliasPrefix(AttributeExchange.ALIAS_PREFIX);
// update URL
String policyURL = response.getUpdateURL();
if (policyURL != null) {
parameters.put(Parameter.update_url.QNAME, policyURL);
}
// attributes
Map<String, List<String>> attributes = response.getAttributes();
for (String typeURI : attributes.keySet()) {
List<String> values = attributes.get(typeURI);
String alias = types.add(typeURI);
QName typeQName = new QName(AttributeExchange.AX_10_NS, Parameter.type.toString() + "." + alias);
parameters.put(typeQName, typeURI);
if (values.size() != 1) {
QName countQName = new QName(AttributeExchange.AX_10_NS, Parameter.count.toString() + "." + alias);
parameters.put(countQName, Integer.toString(values.size()));
}
if (values.isEmpty()) {
continue;
} else if (values.size() == 1) {
QName valueQName = new QName(AttributeExchange.AX_10_NS, Parameter.value.toString() + "." + alias);
parameters.put(valueQName, values.get(0));
} else {
for (int i = 0; i < values.size(); i++) {
QName valueQName = new QName(AttributeExchange.AX_10_NS, Parameter.value.toString() + "." + alias
+ "." + (i + 1));
parameters.put(valueQName, values.get(i));
}
}
}
}
} | apache-2.0 |
lchli/ListItemAsyncDataLoader | LoaderLibrary/app/src/main/java/com/lchli/loaderlibrary/base/AbsViewHolder.java | 384 | package com.lchli.loaderlibrary.base;
import android.view.View;
public abstract class AbsViewHolder {
public final View itemView;
public AbsViewHolder(View itemView) {
if (itemView == null) {
throw new IllegalArgumentException("itemView may not be null");
}
this.itemView = itemView;
}
} | apache-2.0 |
twitter/distributedlog | distributedlog-core/src/main/java/com/twitter/distributedlog/util/Transaction.java | 2921 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.distributedlog.util;
import com.google.common.annotations.Beta;
import com.twitter.util.Future;
/**
* Util class represents a transaction
*/
@Beta
public interface Transaction<OpResult> {
/**
* An operation executed in a transaction.
*/
interface Op<OpResult> {
/**
* Execute after the transaction succeeds
*/
void commit(OpResult r);
/**
* Execute after the transaction fails
*/
void abort(Throwable t, OpResult r);
}
/**
* Listener on the result of an {@link com.twitter.distributedlog.util.Transaction.Op}.
*
* @param <OpResult>
*/
interface OpListener<OpResult> {
/**
* Trigger on operation committed.
*
* @param r
* result to return
*/
void onCommit(OpResult r);
/**
* Trigger on operation aborted.
*
* @param t
* reason to abort
*/
void onAbort(Throwable t);
}
/**
* Add the operation to current transaction.
*
* @param operation
* operation to execute under current transaction
*/
void addOp(Op<OpResult> operation);
/**
* Execute the current transaction. If the transaction succeed, all operations will be
* committed (via {@link com.twitter.distributedlog.util.Transaction.Op#commit(Object)}.
* Otherwise, all operations will be aborted (via {@link Op#abort(Throwable, Object)}).
*
* @return future representing the result of transaction execution.
*/
Future<Void> execute();
/**
* Abort current transaction. If this is called and the transaction haven't been executed by
* {@link #execute()}, it would abort all operations. If the transaction has been executed,
* the behavior is left up to implementation - if transaction is cancellable, the {@link #abort(Throwable)}
* could attempt to cancel it.
*
* @param reason reason to abort the transaction
*/
void abort(Throwable reason);
}
| apache-2.0 |
mhus/mhus-inka | de.mhus.hair/mhu-hair2-ext/src/main/java/de/mhu/hair/sf/scripts/ChangeObjectType.java | 6205 | /*
* Hair2 License
*
* Copyright (C) 2008 Mike Hummel
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package de.mhu.hair.sf.scripts;
import com.documentum.fc.client.IDfCollection;
import com.documentum.fc.client.IDfPersistentObject;
import com.documentum.fc.client.IDfQuery;
import com.documentum.fc.client.IDfSession;
import com.documentum.fc.client.IDfSessionManager;
import com.documentum.fc.client.IDfSysObject;
import com.documentum.fc.common.DfException;
import com.documentum.fc.common.DfId;
import de.mhu.hair.dctm.DMConnection;
import de.mhu.hair.gui.ALogger;
import de.mhu.hair.plugin.PluginNode;
import de.mhu.hair.sf.ScriptIfc;
import de.mhu.hair.tools.ObjectTool;
public class ChangeObjectType implements ScriptIfc {
private String superType;
private String newType;
private ALogger logger;
private DMConnection con;
private boolean cancelCheckout;
private boolean unImmutable;
private boolean recoverImmutable;
private boolean recover;
private boolean deleteRecovered;
private boolean useTransaction;
public void initialize(PluginNode pNode, DMConnection pCon, ALogger pLogger)
throws Exception {
}
public void execute(PluginNode pNode, DMConnection pCon,
IDfPersistentObject[] pTargets, ALogger pLogger) throws Exception {
logger = pLogger;
con = pCon;
for (int i = 0; i < pTargets.length; i++) {
logger.out.println(">>> " + ObjectTool.getPath(pTargets[i]));
try {
changeType((IDfSysObject) pTargets[i]);
} catch (Exception e) {
logger.out.println("*** ERROR: " + e);
e.printStackTrace();
}
}
}
private void changeType(IDfSysObject object) throws Exception {
String strObjId = object.getObjectId().getId();
IDfSessionManager sesMan = con.getSession().getSessionManager();
IDfSession transSession = null;
boolean error = false;
if (useTransaction) {
if (!sesMan.isTransactionActive()) {
sesMan.beginTransaction();
transSession = sesMan.newSession(con.getSession()
.getDocbaseName());
} else {
throw new Exception("ERROR: Cannot start Transaction.");
}
} else {
transSession = con.getSession();
}
try {
boolean immutable = false;
boolean deleted = false;
IDfSysObject obj = (IDfSysObject) transSession.getObject(new DfId(
strObjId));
// restore if needed
if (recover && obj.isDeleted()) {
if (!transSession
.apiExec(
"execsql",
"update dm_sysobject_s set i_is_deleted=0, i_vstamp=i_vstamp+1 where r_object_id='"
+ obj.getObjectId() + "'")) {
throw new Exception("Can't restore: " + obj.getObjectId());
}
obj.fetch(null);
}
// disable immutable flag
// otherwise no change will effected
if (unImmutable && obj.isImmutable()) {
immutable = true;
obj.setBoolean("r_immutable_flag", false);
obj.save();
}
if (cancelCheckout && obj.isCheckedOut()) {
transSession.apiExec("unlock", obj.getString("r_object_id"));
obj.fetch(null);
}
// change to type to super type
String dqlChangeQuery = "CHANGE " + obj.getTypeName()
+ " (all) OBJECT TO " + superType;
dqlChangeQuery += " WHERE r_object_id='" + strObjId + "'";
IDfQuery dql = con.createQuery(dqlChangeQuery);
// if test mode activated nothing happens
IDfCollection collaction = dql.execute(transSession,
IDfQuery.EXEC_QUERY);
collaction.close();
// change super type to needed type
dqlChangeQuery = "CHANGE " + superType + " (all) OBJECT TO "
+ newType;
dqlChangeQuery += " WHERE r_object_id='" + strObjId + "'";
dql.setDQL(dqlChangeQuery);
// if test mode activated nothing happens
collaction = dql.execute(transSession, IDfQuery.EXEC_QUERY);
collaction.close();
if (recoverImmutable && immutable) {
obj.setBoolean("r_immutable_flag", true);
immutable = false;
obj.save();
}
if (deleteRecovered && deleted) {
transSession
.apiExec(
"execsql",
"update dm_sysobject_s set i_is_deleted=0, i_vstamp=i_vstamp+1 where r_object_id='"
+ obj.getObjectId() + "'");
obj.fetch(null);
}
} catch (Exception e) {
logger.out.println("*** ERROR: " + e);
e.printStackTrace();
error = true;
}
// close session
try {
if (useTransaction) {
if (error)
sesMan.abortTransaction();
else
sesMan.commitTransaction();
if (transSession != null)
sesMan.release(transSession);
}
transSession = null;
} catch (DfException e) {
if (useTransaction) {
sesMan.abortTransaction();
if (transSession != null)
sesMan.release(transSession);
}
transSession = null;
throw new DfException(
"ERROR: Commit failed. Last ObjectID procceded: "
+ strObjId);
}
}
public void setSuperType(String in) {
superType = in;
}
public void setNewType(String in) {
newType = in;
}
public void setCancelcheckout(boolean in) {
cancelCheckout = in;
}
public void setUnimmutable(boolean in) {
unImmutable = in;
}
public void setRecoverimmutable(boolean in) {
recoverImmutable = in;
}
public void setRecover(boolean in) {
recover = in;
}
public void setDelete(boolean in) {
deleteRecovered = in;
}
public void setTransaction(boolean in) {
useTransaction = in;
}
public void destroy(PluginNode node, DMConnection con, ALogger logger) {
// TODO Auto-generated method stub
}
}
| apache-2.0 |
alexeev/jboss-fuse-mirror | fabric/fabric-core/src/main/java/io/fabric8/api/jmx/MetaTypeAttributeDTO.java | 4508 | /**
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.fabric8.api.jmx;
import org.osgi.service.metatype.AttributeDefinition;
/**
*/
public class MetaTypeAttributeDTO {
private String id;
private String name;
private String description;
private boolean required;
private int cardinality;
private String typeName;
private String[] defaultValue;
private String[] optionLabels;
private String[] optionValues;
public static String typeName(int type) {
switch (type) {
case AttributeDefinition.BOOLEAN:
return "boolean";
case AttributeDefinition.BYTE:
return "byte";
case AttributeDefinition.CHARACTER:
return "char";
case AttributeDefinition.DOUBLE:
return "double";
case AttributeDefinition.FLOAT:
return "float";
case AttributeDefinition.INTEGER:
return "int";
case AttributeDefinition.LONG:
return "long";
case AttributeDefinition.PASSWORD:
return "password";
case AttributeDefinition.SHORT:
return "short";
case AttributeDefinition.STRING:
return "string";
case AttributeDefinition.BIGDECIMAL:
return "bigdecimal";
case AttributeDefinition.BIGINTEGER:
return "bigint";
default:
return null;
}
}
public MetaTypeAttributeDTO() {
}
public MetaTypeAttributeDTO(AttributeDefinition definition, boolean required) {
this.required = required;
this.id = definition.getID();
this.name = definition.getName();
this.description = definition.getDescription();
this.cardinality = definition.getCardinality();
this.typeName = typeName(definition.getType());
this.defaultValue = definition.getDefaultValue();
this.optionLabels = definition.getOptionLabels();
this.optionValues = definition.getOptionValues();
}
@Override
public String toString() {
return "MetaTypeAttributeDTO{" +
"id='" + id + '\'' +
", name='" + name + '\'' +
'}';
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public int getCardinality() {
return cardinality;
}
public void setCardinality(int cardinality) {
this.cardinality = cardinality;
}
public String getTypeName() {
return typeName;
}
public void setTypeName(String typeName) {
this.typeName = typeName;
}
public String[] getDefaultValue() {
return defaultValue;
}
public void setDefaultValue(String[] defaultValue) {
this.defaultValue = defaultValue;
}
public String[] getOptionLabels() {
return optionLabels;
}
public void setOptionLabels(String[] optionLabels) {
this.optionLabels = optionLabels;
}
public String[] getOptionValues() {
return optionValues;
}
public void setOptionValues(String[] optionValues) {
this.optionValues = optionValues;
}
public boolean isRequired() {
return required;
}
public void setRequired(boolean required) {
this.required = required;
}
}
| apache-2.0 |
health-and-care-developer-network/health-and-care-developer-network | library/hazelcast/2.5/hazelcast-2.5-source/hazelcast/src/main/java/com/hazelcast/impl/TransactionImpl.java | 18140 | /*
* Copyright (c) 2008-2013, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hazelcast.impl;
import com.hazelcast.core.Instance;
import com.hazelcast.core.Instance.InstanceType;
import com.hazelcast.core.Prefix;
import com.hazelcast.core.Transaction;
import com.hazelcast.logging.ILogger;
import com.hazelcast.nio.Data;
import com.hazelcast.util.Clock;
import java.util.*;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.logging.Level;
import static com.hazelcast.nio.IOUtil.toObject;
public class TransactionImpl implements Transaction {
public static final long DEFAULT_TXN_TIMEOUT = 30 * 1000;
private final long id;
private final FactoryImpl factory;
private final List<TransactionRecord> transactionRecords = new CopyOnWriteArrayList<TransactionRecord>();
private int status = TXN_STATUS_NO_TXN;
private final ILogger logger;
public TransactionImpl(FactoryImpl factory, long txnId) {
this.id = txnId;
this.factory = factory;
this.logger = factory.getLoggingService().getLogger(this.getClass().getName());
}
public Data attachPutOp(String name, Object key, Data value, boolean newRecord) {
return attachPutOp(name, key, value, 0, -1, newRecord, -1);
}
public void attachPutMultiOp(String name, Object key, Data value) {
transactionRecords.add(new TransactionRecord(name, key, value, true));
}
public Data attachPutOp(String name, Object key, Data value, int timeout, long ttl, boolean newRecord) {
return attachPutOp(name, key, value, timeout, ttl, newRecord, -1);
}
public Data attachPutOp(String name, Object key, Data value, long timeout, boolean newRecord, int index) {
return attachPutOp(name, key, value, timeout, -1, newRecord, index);
}
public Data attachPutOp(String name, Object key, Data value, long timeout, long ttl, boolean newRecord, int index) {
Instance.InstanceType instanceType = ConcurrentMapManager.getInstanceType(name);
Object matchValue = (instanceType.isMultiMap()) ? toObject(value) : null;
TransactionRecord rec = findTransactionRecord(name, key, matchValue);
if (rec == null) {
rec = new TransactionRecord(name, key, value, newRecord);
rec.timeout = timeout;
rec.ttl = ttl;
rec.index = index;
transactionRecords.add(rec);
return null;
} else {
Data old = rec.value;
rec.value = value;
rec.removed = false;
rec.index = index;
return old;
}
}
public Data attachRemoveOp(String name, Object key, Data value, boolean newRecord) {
return attachRemoveOp(name, key, value, newRecord, 1);
}
public Data attachRemoveOp(String name, Object key, Data value, boolean newRecord, int valueCount) {
Instance.InstanceType instanceType = ConcurrentMapManager.getInstanceType(name);
Object matchValue = (instanceType.isMultiMap()) ? toObject(value) : null;
TransactionRecord rec = findTransactionRecord(name, key, matchValue);
Data oldValue = null;
if (rec == null) {
rec = new TransactionRecord(name, key, value, newRecord);
transactionRecords.add(rec);
} else {
oldValue = rec.value;
rec.value = value;
}
rec.valueCount = valueCount;
rec.removed = true;
return oldValue;
}
public void begin() throws IllegalStateException {
if (status == TXN_STATUS_ACTIVE) {
throw new IllegalStateException("Transaction is already active");
}
status = TXN_STATUS_ACTIVE;
}
public void commit() throws IllegalStateException {
if (status != TXN_STATUS_ACTIVE) {
throw new IllegalStateException("Transaction is not active");
}
status = TXN_STATUS_COMMITTING;
try {
ThreadContext.get().setCurrentFactory(factory);
for (TransactionRecord record : transactionRecords) {
record.commit();
}
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
finalizeTxn();
status = TXN_STATUS_COMMITTED;
}
}
public void rollback() throws IllegalStateException {
if (status == TXN_STATUS_NO_TXN || status == TXN_STATUS_UNKNOWN
|| status == TXN_STATUS_COMMITTED || status == TXN_STATUS_ROLLED_BACK) {
throw new IllegalStateException("Transaction is not ready to rollback. Status= "
+ status);
}
status = TXN_STATUS_ROLLING_BACK;
try {
ThreadContext.get().setCurrentFactory(factory);
final int size = transactionRecords.size();
ListIterator<TransactionRecord> iter = transactionRecords.listIterator(size);
while (iter.hasPrevious()) {
TransactionRecord record = iter.previous();
if (record.instanceType.isQueue()) {
rollbackMapTransactionRecordOfQueue(iter, record);
}
record.rollback();
}
} catch (Exception e) {
logger.log(Level.WARNING, e.getMessage(), e);
} finally {
finalizeTxn();
status = TXN_STATUS_ROLLED_BACK;
}
}
// Queues have two transaction records per operation;
// one for queue key, one for map item. During rollback
// we should rollback map record first.
// See github issue#99 and TransactionTest.issue99TestQueueTakeAndDuringRollback
private void rollbackMapTransactionRecordOfQueue(final ListIterator<TransactionRecord> iter,
final TransactionRecord queueTxRecord) {
if (iter.hasPrevious()) {
TransactionRecord prevRecord = iter.previous();
if (prevRecord.instanceType != InstanceType.MAP) {
logger.log(Level.WARNING, "Map#TransactionRecord is expected before a " +
"Queue#TransactionRecord, but got " + prevRecord.instanceType);
} else if (!prevRecord.name.equals(Prefix.MAP + queueTxRecord.name)) {
logger.log(Level.WARNING, "Expecting a record of " + Prefix.MAP + queueTxRecord.name
+ " but got " + prevRecord.name);
}
// Rollback previous transaction record, even if it is not expected record.
prevRecord.rollback();
}
}
public boolean containsValue(String name, Object value) {
for (TransactionRecord transactionRecord : transactionRecords) {
if (transactionRecord.name.equals(name)) {
if (!transactionRecord.removed) {
if (value.equals(toObject(transactionRecord.value))) {
return true;
}
}
}
}
return false;
}
public boolean containsEntry(String name, Object key, Object value) {
TransactionRecord transactionRecord = findTransactionRecord(name, key, value);
return transactionRecord != null && !transactionRecord.removed;
}
private TransactionRecord findTransactionRecord(String name, Object key) {
for (TransactionRecord transactionRecord : transactionRecords) {
if (transactionRecord.name.equals(name)) {
if (transactionRecord.key != null) {
if (transactionRecord.key.equals(key)) {
return transactionRecord;
}
}
}
}
return null;
}
private TransactionRecord findTransactionRecord(String name, Object key, Object value) {
for (TransactionRecord transactionRecord : transactionRecords) {
if (transactionRecord.name.equals(name)) {
if (transactionRecord.key != null) {
if (transactionRecord.key.equals(key)) {
final Object txValue = toObject(transactionRecord.value);
if (transactionRecord.instanceType.isMultiMap()) {
if (value == null && txValue == null) {
return transactionRecord;
} else if (value != null && value.equals(txValue)) {
return transactionRecord;
}
} else {
if (value == null) {
return transactionRecord;
} else if (value.equals(txValue)) {
return transactionRecord;
}
}
}
}
}
}
return null;
}
public Data get(String name, Object key) {
TransactionRecord rec = findTransactionRecord(name, key);
if (rec == null) {
return null;
}
if (rec.removed) {
return null;
}
rec.lastAccess = Clock.currentTimeMillis();
return rec.value;
}
public long getId() {
return id;
}
public int getStatus() {
return status;
}
public boolean has(String name, Object key) {
return findTransactionRecord(name, key) != null;
}
public boolean has(String name, Object key, Object value) {
return findTransactionRecord(name, key, value) != null;
}
public boolean isNew(String name, Object key) {
TransactionRecord rec = findTransactionRecord(name, key);
return (rec != null && !rec.removed && rec.newRecord);
}
public boolean isRemoved(String name, Object key) {
TransactionRecord rec = findTransactionRecord(name, key);
return (rec != null && rec.removed);
}
public int size(String name) {
int size = 0;
for (TransactionRecord transactionRecord : transactionRecords) {
if (transactionRecord.name.equals(name)) {
if (transactionRecord.removed) {
if (transactionRecord.instanceType.isSet()) {
size--;
} else if (!transactionRecord.newRecord) {
if (!transactionRecord.instanceType.isQueue()) {
size -= transactionRecord.valueCount;
}
}
} else if (transactionRecord.newRecord) {
if (transactionRecord.instanceType.isList()) {
size += (Integer) toObject(transactionRecord.value);
} else {
size++;
}
}
}
}
return size;
}
public List<Map.Entry> newEntries(String name) {
List<Map.Entry> lsEntries = null;
for (TransactionRecord transactionRecord : transactionRecords) {
if (transactionRecord.name.equals(name)) {
if (!transactionRecord.removed) {
if (transactionRecord.value != null) {
if (transactionRecord.newRecord) {
if (lsEntries == null) {
lsEntries = new ArrayList<Map.Entry>(2);
}
lsEntries.add(BaseManager.createSimpleMapEntry(factory, name, transactionRecord.key, transactionRecord.value));
}
}
}
}
}
return lsEntries;
}
public void getMulti(String name, Object key, Collection col) {
for (TransactionRecord transactionRecord : transactionRecords) {
if (transactionRecord.name.equals(name)) {
if (key.equals(transactionRecord.key)) {
if (!transactionRecord.removed && transactionRecord.newRecord) {
col.add(toObject(transactionRecord.value));
} else if (transactionRecord.removed) {
if (transactionRecord.value == null) {
col.clear();
return;
} else {
col.remove(toObject(transactionRecord.value));
}
}
}
}
}
}
public Map newKeys(String name) {
Map newEntries = null;
for (TransactionRecord transactionRecord : transactionRecords) {
if (transactionRecord.name.equals(name)) {
if (!transactionRecord.removed) {
if (transactionRecord.value != null) {
if (transactionRecord.newRecord) {
if (newEntries == null) {
newEntries = new HashMap();
}
newEntries.put(transactionRecord.key, transactionRecord.value);
}
}
}
}
}
return newEntries;
}
@Override
public String toString() {
return "TransactionImpl [" + id + "] status: " + status;
}
private void finalizeTxn() {
transactionRecords.clear();
status = TXN_STATUS_NO_TXN;
ThreadContext.get().finalizeTxn();
}
private class TransactionRecord {
public String name;
public Object key;
public Data value;
public boolean removed = false;
public boolean newRecord = false;
public Instance.InstanceType instanceType = null;
public long lastAccess = -1;
public int valueCount = 1;
public long timeout = 0; // for commit
public long ttl = -1;
public int index = -1;
public TransactionRecord(String name, Object key, Data value, boolean newRecord) {
this.name = name;
this.key = key;
this.value = value;
this.newRecord = newRecord;
instanceType = ConcurrentMapManager.getInstanceType(name);
}
public TransactionRecord(String name, Object key, Data value, int index, boolean newRecord) {
this.name = name;
this.key = key;
this.value = value;
this.newRecord = newRecord;
this.index = index;
instanceType = ConcurrentMapManager.getInstanceType(name);
}
public void commit() {
if (instanceType == Instance.InstanceType.QUEUE) {
commitQueue();
} else {
commitMap();
}
}
public void commitMap() {
if (removed) {
if (instanceType.isSet()) {
factory.node.concurrentMapManager.new MRemoveItem().removeItem(name, key);
} else if (!newRecord) {
if (instanceType.isMap()) {
factory.node.concurrentMapManager.new MRemove().remove(name, key);
} else if (instanceType.isMultiMap()) {
if (value == null) {
factory.node.concurrentMapManager.new MRemove().remove(name, key);
} else {
factory.node.concurrentMapManager.new MRemoveMulti().remove(name, key, value);
}
}
}
// since we do not have removeAndUnlock op, we should explicitly call unlock after remove!
factory.node.concurrentMapManager.new MLock().unlock(name, key, -1);
} else {
if (instanceType.isMultiMap()) {
factory.node.concurrentMapManager.new MPutMulti().put(name, key, value);
} else {
if (value != null) {
factory.node.concurrentMapManager.new MPut().putAfterCommit(name, key, value, ttl, id);
} else {
factory.node.concurrentMapManager.new MLock().unlock(name, key, -1);
}
}
}
}
public void commitQueue() {
if (!removed) {
factory.node.blockingQueueManager.offerCommit(name, key, value, index);
}
}
public void rollback() {
if (instanceType == Instance.InstanceType.QUEUE) {
rollbackQueue();
} else {
rollbackMap();
}
}
public void rollbackMap() {
factory.node.concurrentMapManager.new MLock().unlock(name, key, -1);
}
public void rollbackQueue() {
if (removed) {
factory.node.blockingQueueManager.rollbackPoll(name, key);
}
}
@Override
public String toString() {
return "TransactionRecord{" +
"instanceType=" + instanceType +
", name='" + name + '\'' +
", key=" + key +
", value=" + value +
", removed=" + removed +
", newRecord=" + newRecord +
", lastAccess=" + lastAccess +
", valueCount=" + valueCount +
'}';
}
}
}
| apache-2.0 |
LeonardoCardoso/Silence-Please | app/src/main/java/com/leocardz/silence/please/adapter/item/PSettingsAdapterItem.java | 685 | package com.leocardz.silence.please.adapter.item;
public class PSettingsAdapterItem {
private int title;
private String summary;
private int type;
public PSettingsAdapterItem(int title, String summary, int type) {
this.title = title;
this.summary = summary;
this.type = type;
}
public int getItemViewType() {
return this.getType();
}
public int getTitle() {
return title;
}
public void setTitle(int title) {
this.title = title;
}
public String getSummary() {
return summary;
}
public void setSummary(String summary) {
this.summary = summary;
}
public int getType() {
return type;
}
public void setType(int type) {
this.type = type;
}
}
| apache-2.0 |
npgall/cqengine | code/src/main/java/com/googlecode/cqengine/index/support/KeyStatisticsIndex.java | 3103 | /**
* Copyright 2012-2015 Niall Gallagher
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.googlecode.cqengine.index.support;
import com.googlecode.cqengine.index.Index;
import com.googlecode.cqengine.query.option.QueryOptions;
/**
* An index which allows the set of distinct keys to be queried, and which can return statistics on the number of
* objects stored in the buckets for each key.
* <p/>
* Note that this interface reads statistics about keys and NOT about attribute values from the index.
* Often those statistics will be the same, however if a {@link com.googlecode.cqengine.quantizer.Quantizer} is
* configured for an index, then often objects for several attribute values may have the same key and may be stored
* in the same bucket.
*
* Created by niall.gallagher on 09/01/2015.
*/
public interface KeyStatisticsIndex<A, O> extends Index<O> {
/**
* @return The distinct keys in the index
* @param queryOptions Optional parameters for the query
*/
public CloseableIterable<A> getDistinctKeys(QueryOptions queryOptions);
/**
* @param key A key which may be contained in the index
* @param queryOptions Optional parameters for the query
* @return The number of objects stored in the bucket in the index with the given key
*/
public Integer getCountForKey(A key, QueryOptions queryOptions);
/**
* Returns the count of distinct keys in the index.
*
* @param queryOptions Optional parameters for the query
* @return The count of distinct keys in the index.
*/
public Integer getCountOfDistinctKeys(QueryOptions queryOptions);
/**
* Returns the statistics {@link KeyStatistics} for all distinct keys in the index
*
* @param queryOptions Optional parameters for the query
* @return The statistics {@link KeyStatistics} for all distinct keys in the index
*/
public CloseableIterable<KeyStatistics<A>> getStatisticsForDistinctKeys(QueryOptions queryOptions);
/**
* Returns the keys and corresponding values for those keys in the index. Note the same key
* will be returned multiple times if more than one object has the same key. Also the same value might be returned
* multiple times, each time for a different key, if the index is built on a multi-value attribute.
*
* @return The keys and corresponding values for those keys in the index
*
* @param queryOptions Optional parameters for the query
*/
public CloseableIterable<KeyValue<A, O>> getKeysAndValues(QueryOptions queryOptions);
}
| apache-2.0 |
MegatronKing/SVG-Android | svg-iconlibs/action/src/main/java/com/github/megatronking/svg/iconlibs/ic_assignment_returned.java | 2769 | package com.github.megatronking.svg.iconlibs;
import android.content.Context;
import android.graphics.Canvas;
import android.graphics.ColorFilter;
import android.graphics.Paint;
import com.github.megatronking.svg.support.SVGRenderer;
/**
* AUTO-GENERATED FILE. DO NOT MODIFY.
*
* This class was automatically generated by the
* SVG-Generator. It should not be modified by hand.
*/
public class ic_assignment_returned extends SVGRenderer {
public ic_assignment_returned(Context context) {
super(context);
mAlpha = 1.0f;
mWidth = dip2px(24.0f);
mHeight = dip2px(24.0f);
}
@Override
public void render(Canvas canvas, int w, int h, ColorFilter filter) {
final float scaleX = w / 24.0f;
final float scaleY = h / 24.0f;
mPath.reset();
mRenderPath.reset();
mFinalPathMatrix.setValues(new float[]{1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 1.0f});
mFinalPathMatrix.postScale(scaleX, scaleY);
mPath.moveTo(19.0f, 3.0f);
mPath.rLineTo(-4.18f, 0f);
mPath.cubicTo(14.4f, 1.84f, 13.3f, 1.0f, 12.0f, 1.0f);
mPath.rCubicTo(-1.3f, 0.0f, -2.4f, 0.84f, -2.82f, 2.0f);
mPath.lineTo(5.0f, 3.0f);
mPath.rCubicTo(-1.1f, 0.0f, -2.0f, 0.9f, -2.0f, 2.0f);
mPath.rLineTo(0f, 14.0f);
mPath.rCubicTo(0.0f, 1.1f, 0.9f, 2.0f, 2.0f, 2.0f);
mPath.rLineTo(14.0f, 0f);
mPath.rCubicTo(1.1f, 0.0f, 2.0f, -0.9f, 2.0f, -2.0f);
mPath.lineTo(21.0f, 5.0f);
mPath.rCubicTo(0.0f, -1.1f, -0.9f, -2.0f, -2.0f, -2.0f);
mPath.close();
mPath.moveTo(19.0f, 3.0f);
mPath.rMoveTo(-7.0f, 0.0f);
mPath.rCubicTo(0.55f, 0.0f, 1.0f, 0.45f, 1.0f, 1.0f);
mPath.rCubicTo(0.0f, 0.54999995f, -0.45f, 1.0f, -1.0f, 1.0f);
mPath.rCubicTo(-0.5500002f, 0.0f, -1.0f, -0.45f, -1.0f, -1.0f);
mPath.rCubicTo(0.0f, -0.5500002f, 0.45f, -1.0f, 1.0f, -1.0f);
mPath.close();
mPath.moveTo(12.0f, 3.0f);
mPath.rMoveTo(0.0f, 15.0f);
mPath.rLineTo(-5.0f, -5.0f);
mPath.rLineTo(3.0f, 0f);
mPath.lineTo(10.0f, 9.0f);
mPath.rLineTo(4.0f, 0f);
mPath.rLineTo(0f, 4.0f);
mPath.rLineTo(3.0f, 0f);
mPath.rLineTo(-5.0f, 5.0f);
mPath.close();
mPath.moveTo(12.0f, 18.0f);
mRenderPath.addPath(mPath, mFinalPathMatrix);
if (mFillPaint == null) {
mFillPaint = new Paint();
mFillPaint.setStyle(Paint.Style.FILL);
mFillPaint.setAntiAlias(true);
}
mFillPaint.setColor(applyAlpha(-16777216, 1.0f));
mFillPaint.setColorFilter(filter);
canvas.drawPath(mRenderPath, mFillPaint);
}
} | apache-2.0 |
gawkermedia/googleads-java-lib | modules/adwords_appengine/src/main/java/com/google/api/ads/adwords/jaxws/v201509/rm/RuleBasedUserList.java | 1024 |
package com.google.api.ads.adwords.jaxws.v201509.rm;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlSeeAlso;
import javax.xml.bind.annotation.XmlType;
/**
*
* Representation of a userlist that is generated by a rule.
*
*
* <p>Java class for RuleBasedUserList complex type.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType name="RuleBasedUserList">
* <complexContent>
* <extension base="{https://adwords.google.com/api/adwords/rm/v201509}UserList">
* <sequence>
* </sequence>
* </extension>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "RuleBasedUserList")
@XmlSeeAlso({
DateSpecificRuleUserList.class,
ExpressionRuleUserList.class
})
public class RuleBasedUserList
extends UserList
{
}
| apache-2.0 |
caveman-frak/java-core | core-model/src/main/java/uk/co/bluegecko/core/model/base/extension/AbstractExtendedModel.java | 2265 | /**
* Copyright 2015, <a href="http://bluegecko.co.uk/java-core">Blue Gecko Limited</a>
*/
package uk.co.bluegecko.core.model.base.extension;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import uk.co.bluegecko.core.model.Model;
import uk.co.bluegecko.core.model.extension.Extended;
import uk.co.bluegecko.core.model.extension.Extension;
/**
* A base implementation of an model supporting extensions.
*
* @param <M>
* the model class
*/
public abstract class AbstractExtendedModel< M extends Model > implements Extended< M >, Model
{
private static final long serialVersionUID = -1264009544621267181L;
private final List< Extension< M >> extensions;
/**
* Create an extended model, passing in the list of supported extensions.
*
* @param extensions
* list of extensions to add to the model
*/
public AbstractExtendedModel( final List< Extension< M >> extensions )
{
super();
this.extensions = extensions;
}
/**
* Create an extended model.
*/
public AbstractExtendedModel()
{
this( new ArrayList<>() );
}
/*
* (non-Javadoc)
* @see uk.co.bluegecko.core.model.extension.Extended#setExtensions(java.util.List)
*/
@Override
public void setExtensions( final List< Extension< M >> extensions )
{
this.extensions.clear();
this.extensions.addAll( extensions );
}
/*
* (non-Javadoc)
* @see uk.co.bluegecko.core.model.extension.Extended#getExtensions()
*/
@Override
public List< Extension< M >> getExtensions()
{
return Collections.unmodifiableList( extensions );
}
/*
* (non-Javadoc)
* @see uk.co.bluegecko.core.model.extension.Extended#getExtensions(java.lang.Class)
*/
@SuppressWarnings( "unchecked" )
@Override
public < T extends Extension< M >> T getExtension( final Class< T > klass )
{
for ( final Extension< M > extension : extensions )
{
if ( klass.isInstance( extension ) )
{
return ( T ) extension;
}
}
return null;
}
/*
* (non-Javadoc)
* @see uk.co.bluegecko.core.model.extension.Extended#applyExtension()
*/
@SuppressWarnings( "unchecked" )
@Override
public void applyExtension()
{
for ( final Extension< M > extension : extensions )
{
extension.apply( ( M ) this );
}
}
}
| apache-2.0 |
aws/aws-sdk-java | aws-java-sdk-resiliencehub/src/main/java/com/amazonaws/services/resiliencehub/model/transform/TestRecommendationJsonUnmarshaller.java | 5046 | /*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.resiliencehub.model.transform;
import java.math.*;
import javax.annotation.Generated;
import com.amazonaws.services.resiliencehub.model.*;
import com.amazonaws.transform.SimpleTypeJsonUnmarshallers.*;
import com.amazonaws.transform.*;
import com.fasterxml.jackson.core.JsonToken;
import static com.fasterxml.jackson.core.JsonToken.*;
/**
* TestRecommendation JSON Unmarshaller
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class TestRecommendationJsonUnmarshaller implements Unmarshaller<TestRecommendation, JsonUnmarshallerContext> {
public TestRecommendation unmarshall(JsonUnmarshallerContext context) throws Exception {
TestRecommendation testRecommendation = new TestRecommendation();
int originalDepth = context.getCurrentDepth();
String currentParentElement = context.getCurrentParentElement();
int targetDepth = originalDepth + 1;
JsonToken token = context.getCurrentToken();
if (token == null)
token = context.nextToken();
if (token == VALUE_NULL) {
return null;
}
while (true) {
if (token == null)
break;
if (token == FIELD_NAME || token == START_OBJECT) {
if (context.testExpression("appComponentName", targetDepth)) {
context.nextToken();
testRecommendation.setAppComponentName(context.getUnmarshaller(String.class).unmarshall(context));
}
if (context.testExpression("description", targetDepth)) {
context.nextToken();
testRecommendation.setDescription(context.getUnmarshaller(String.class).unmarshall(context));
}
if (context.testExpression("intent", targetDepth)) {
context.nextToken();
testRecommendation.setIntent(context.getUnmarshaller(String.class).unmarshall(context));
}
if (context.testExpression("items", targetDepth)) {
context.nextToken();
testRecommendation.setItems(new ListUnmarshaller<RecommendationItem>(RecommendationItemJsonUnmarshaller.getInstance())
.unmarshall(context));
}
if (context.testExpression("name", targetDepth)) {
context.nextToken();
testRecommendation.setName(context.getUnmarshaller(String.class).unmarshall(context));
}
if (context.testExpression("prerequisite", targetDepth)) {
context.nextToken();
testRecommendation.setPrerequisite(context.getUnmarshaller(String.class).unmarshall(context));
}
if (context.testExpression("recommendationId", targetDepth)) {
context.nextToken();
testRecommendation.setRecommendationId(context.getUnmarshaller(String.class).unmarshall(context));
}
if (context.testExpression("referenceId", targetDepth)) {
context.nextToken();
testRecommendation.setReferenceId(context.getUnmarshaller(String.class).unmarshall(context));
}
if (context.testExpression("risk", targetDepth)) {
context.nextToken();
testRecommendation.setRisk(context.getUnmarshaller(String.class).unmarshall(context));
}
if (context.testExpression("type", targetDepth)) {
context.nextToken();
testRecommendation.setType(context.getUnmarshaller(String.class).unmarshall(context));
}
} else if (token == END_ARRAY || token == END_OBJECT) {
if (context.getLastParsedParentElement() == null || context.getLastParsedParentElement().equals(currentParentElement)) {
if (context.getCurrentDepth() <= originalDepth)
break;
}
}
token = context.nextToken();
}
return testRecommendation;
}
private static TestRecommendationJsonUnmarshaller instance;
public static TestRecommendationJsonUnmarshaller getInstance() {
if (instance == null)
instance = new TestRecommendationJsonUnmarshaller();
return instance;
}
}
| apache-2.0 |
aws/aws-sdk-java | aws-java-sdk-panorama/src/main/java/com/amazonaws/services/panorama/model/transform/CreatePackageImportJobRequestProtocolMarshaller.java | 2739 | /*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.panorama.model.transform;
import javax.annotation.Generated;
import com.amazonaws.SdkClientException;
import com.amazonaws.Request;
import com.amazonaws.http.HttpMethodName;
import com.amazonaws.services.panorama.model.*;
import com.amazonaws.transform.Marshaller;
import com.amazonaws.protocol.*;
import com.amazonaws.protocol.Protocol;
import com.amazonaws.annotation.SdkInternalApi;
/**
* CreatePackageImportJobRequest Marshaller
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
@SdkInternalApi
public class CreatePackageImportJobRequestProtocolMarshaller implements Marshaller<Request<CreatePackageImportJobRequest>, CreatePackageImportJobRequest> {
private static final OperationInfo SDK_OPERATION_BINDING = OperationInfo.builder().protocol(Protocol.REST_JSON).requestUri("/packages/import-jobs")
.httpMethodName(HttpMethodName.POST).hasExplicitPayloadMember(false).hasPayloadMembers(true).serviceName("AWSPanorama").build();
private final com.amazonaws.protocol.json.SdkJsonProtocolFactory protocolFactory;
public CreatePackageImportJobRequestProtocolMarshaller(com.amazonaws.protocol.json.SdkJsonProtocolFactory protocolFactory) {
this.protocolFactory = protocolFactory;
}
public Request<CreatePackageImportJobRequest> marshall(CreatePackageImportJobRequest createPackageImportJobRequest) {
if (createPackageImportJobRequest == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
final ProtocolRequestMarshaller<CreatePackageImportJobRequest> protocolMarshaller = protocolFactory.createProtocolMarshaller(SDK_OPERATION_BINDING,
createPackageImportJobRequest);
protocolMarshaller.startMarshalling();
CreatePackageImportJobRequestMarshaller.getInstance().marshall(createPackageImportJobRequest, protocolMarshaller);
return protocolMarshaller.finishMarshalling();
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
}
}
| apache-2.0 |
wbowling/elasticsearch | qa/evil-tests/src/test/java/org/elasticsearch/plugins/PluginManagerTests.java | 37788 | /*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.plugins;
import org.apache.http.impl.client.HttpClients;
import org.apache.lucene.util.LuceneTestCase;
import org.elasticsearch.Version;
import org.elasticsearch.common.Base64;
import org.elasticsearch.common.cli.CliTool;
import org.elasticsearch.common.cli.CliTool.ExitStatus;
import org.elasticsearch.common.cli.CliToolTestCase.CaptureOutputTerminal;
import org.elasticsearch.common.hash.MessageDigests;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.node.internal.InternalSettingsPreparer;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
import org.elasticsearch.test.junit.annotations.Network;
import org.elasticsearch.test.rest.client.http.HttpRequestBuilder;
import org.elasticsearch.test.rest.client.http.HttpResponse;
import org.jboss.netty.bootstrap.ServerBootstrap;
import org.jboss.netty.channel.*;
import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory;
import org.jboss.netty.handler.codec.http.*;
import org.jboss.netty.handler.ssl.SslContext;
import org.jboss.netty.handler.ssl.SslHandler;
import org.jboss.netty.handler.ssl.util.InsecureTrustManagerFactory;
import org.jboss.netty.handler.ssl.util.SelfSignedCertificate;
import org.junit.After;
import org.junit.Before;
import javax.net.ssl.HttpsURLConnection;
import javax.net.ssl.SSLContext;
import javax.net.ssl.SSLSocketFactory;
import java.io.BufferedWriter;
import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.nio.charset.StandardCharsets;
import java.nio.file.FileVisitResult;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.SimpleFileVisitor;
import java.nio.file.StandardOpenOption;
import java.nio.file.attribute.BasicFileAttributes;
import java.nio.file.attribute.PosixFileAttributeView;
import java.nio.file.attribute.PosixFileAttributes;
import java.nio.file.attribute.PosixFilePermission;
import java.util.ArrayList;
import java.util.List;
import java.util.Locale;
import java.util.jar.JarOutputStream;
import java.util.zip.ZipEntry;
import java.util.zip.ZipOutputStream;
import static org.elasticsearch.common.cli.CliTool.ExitStatus.USAGE;
import static org.elasticsearch.common.cli.CliToolTestCase.args;
import static org.elasticsearch.common.io.FileTestUtils.assertFileContent;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.test.ESIntegTestCase.Scope;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
import static org.hamcrest.Matchers.*;
import static org.jboss.netty.handler.codec.http.HttpVersion.HTTP_1_1;
@ClusterScope(scope = Scope.TEST, numDataNodes = 0, transportClientRatio = 0.0)
@LuceneTestCase.SuppressFileSystems("*") // TODO: clean up this test to allow extra files
// TODO: jimfs is really broken here (throws wrong exception from detection method).
// if its in your classpath, then do not use plugins!!!!!!
public class PluginManagerTests extends ESIntegTestCase {
private Environment environment;
private CaptureOutputTerminal terminal = new CaptureOutputTerminal();
@Before
public void setup() throws Exception {
environment = buildInitialSettings();
System.setProperty("es.default.path.home", environment.settings().get("path.home"));
Path binDir = environment.binFile();
if (!Files.exists(binDir)) {
Files.createDirectories(binDir);
}
Path configDir = environment.configFile();
if (!Files.exists(configDir)) {
Files.createDirectories(configDir);
}
}
@After
public void clearPathHome() {
System.clearProperty("es.default.path.home");
}
private void writeSha1(Path file, boolean corrupt) throws IOException {
String sha1Hex = MessageDigests.toHexString(MessageDigests.sha1().digest(Files.readAllBytes(file)));
try (BufferedWriter out = Files.newBufferedWriter(file.resolveSibling(file.getFileName() + ".sha1"), StandardCharsets.UTF_8)) {
out.write(sha1Hex);
if (corrupt) {
out.write("bad");
}
}
}
private void writeMd5(Path file, boolean corrupt) throws IOException {
String md5Hex = MessageDigests.toHexString(MessageDigests.md5().digest(Files.readAllBytes(file)));
try (BufferedWriter out = Files.newBufferedWriter(file.resolveSibling(file.getFileName() + ".md5"), StandardCharsets.UTF_8)) {
out.write(md5Hex);
if (corrupt) {
out.write("bad");
}
}
}
/** creates a plugin .zip and returns the url for testing */
private String createPlugin(final Path structure, String... properties) throws IOException {
PluginTestUtil.writeProperties(structure, properties);
Path zip = createTempDir().resolve(structure.getFileName() + ".zip");
try (ZipOutputStream stream = new ZipOutputStream(Files.newOutputStream(zip))) {
Files.walkFileTree(structure, new SimpleFileVisitor<Path>() {
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
stream.putNextEntry(new ZipEntry(structure.relativize(file).toString()));
Files.copy(file, stream);
return FileVisitResult.CONTINUE;
}
});
}
if (randomBoolean()) {
writeSha1(zip, false);
} else if (randomBoolean()) {
writeMd5(zip, false);
}
return zip.toUri().toURL().toString();
}
/** creates a plugin .zip and bad checksum file and returns the url for testing */
private String createPluginWithBadChecksum(final Path structure, String... properties) throws IOException {
PluginTestUtil.writeProperties(structure, properties);
Path zip = createTempDir().resolve(structure.getFileName() + ".zip");
try (ZipOutputStream stream = new ZipOutputStream(Files.newOutputStream(zip))) {
Files.walkFileTree(structure, new SimpleFileVisitor<Path>() {
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
stream.putNextEntry(new ZipEntry(structure.relativize(file).toString()));
Files.copy(file, stream);
return FileVisitResult.CONTINUE;
}
});
}
if (randomBoolean()) {
writeSha1(zip, true);
} else {
writeMd5(zip, true);
}
return zip.toUri().toURL().toString();
}
public void testThatPluginNameMustBeSupplied() throws IOException {
Path pluginDir = createTempDir().resolve("fake-plugin");
String pluginUrl = createPlugin(pluginDir,
"description", "fake desc",
"name", "fake-plugin",
"version", "1.0",
"elasticsearch.version", Version.CURRENT.toString(),
"java.version", System.getProperty("java.specification.version"),
"jvm", "true",
"classname", "FakePlugin");
assertStatus("install", USAGE);
}
public void testLocalPluginInstallWithBinAndConfig() throws Exception {
String pluginName = "fake-plugin";
Path pluginDir = createTempDir().resolve(pluginName);
// create bin/tool and config/file
Files.createDirectories(pluginDir.resolve("bin"));
Files.createFile(pluginDir.resolve("bin").resolve("tool"));
Files.createDirectories(pluginDir.resolve("config"));
Files.createFile(pluginDir.resolve("config").resolve("file"));
String pluginUrl = createPlugin(pluginDir,
"description", "fake desc",
"name", pluginName,
"version", "1.0",
"elasticsearch.version", Version.CURRENT.toString(),
"java.version", System.getProperty("java.specification.version"),
"jvm", "true",
"classname", "FakePlugin");
Path binDir = environment.binFile();
Path pluginBinDir = binDir.resolve(pluginName);
Path pluginConfigDir = environment.configFile().resolve(pluginName);
assertStatusOk("install " + pluginUrl + " --verbose");
terminal.getTerminalOutput().clear();
assertStatusOk("list");
assertThat(terminal.getTerminalOutput(), hasItem(containsString(pluginName)));
assertDirectoryExists(pluginBinDir);
assertDirectoryExists(pluginConfigDir);
Path toolFile = pluginBinDir.resolve("tool");
assertFileExists(toolFile);
// check that the file is marked executable, without actually checking that we can execute it.
PosixFileAttributeView view = Files.getFileAttributeView(toolFile, PosixFileAttributeView.class);
// the view might be null, on e.g. windows, there is nothing to check there!
if (view != null) {
PosixFileAttributes attributes = view.readAttributes();
assertThat(attributes.permissions(), hasItem(PosixFilePermission.OWNER_EXECUTE));
assertThat(attributes.permissions(), hasItem(PosixFilePermission.OWNER_READ));
}
}
/**
* Test for #7890
*/
public void testLocalPluginInstallWithBinAndConfigInAlreadyExistingConfigDir_7890() throws Exception {
String pluginName = "fake-plugin";
Path pluginDir = createTempDir().resolve(pluginName);
// create config/test.txt with contents 'version1'
Files.createDirectories(pluginDir.resolve("config"));
Files.write(pluginDir.resolve("config").resolve("test.txt"), "version1".getBytes(StandardCharsets.UTF_8));
String pluginUrl = createPlugin(pluginDir,
"description", "fake desc",
"name", pluginName,
"version", "1.0",
"elasticsearch.version", Version.CURRENT.toString(),
"java.version", System.getProperty("java.specification.version"),
"jvm", "true",
"classname", "FakePlugin");
Path pluginConfigDir = environment.configFile().resolve(pluginName);
assertStatusOk(String.format(Locale.ROOT, "install %s --verbose", pluginUrl));
/*
First time, our plugin contains:
- config/test.txt (version1)
*/
assertFileContent(pluginConfigDir, "test.txt", "version1");
// We now remove the plugin
assertStatusOk("remove " + pluginName);
// We should still have test.txt
assertFileContent(pluginConfigDir, "test.txt", "version1");
// Installing a new plugin version
/*
Second time, our plugin contains:
- config/test.txt (version2)
- config/dir/testdir.txt (version1)
- config/dir/subdir/testsubdir.txt (version1)
*/
Files.write(pluginDir.resolve("config").resolve("test.txt"), "version2".getBytes(StandardCharsets.UTF_8));
Files.createDirectories(pluginDir.resolve("config").resolve("dir").resolve("subdir"));
Files.write(pluginDir.resolve("config").resolve("dir").resolve("testdir.txt"), "version1".getBytes(StandardCharsets.UTF_8));
Files.write(pluginDir.resolve("config").resolve("dir").resolve("subdir").resolve("testsubdir.txt"), "version1".getBytes(StandardCharsets.UTF_8));
pluginUrl = createPlugin(pluginDir,
"description", "fake desc",
"name", pluginName,
"version", "2.0",
"elasticsearch.version", Version.CURRENT.toString(),
"java.version", System.getProperty("java.specification.version"),
"jvm", "true",
"classname", "FakePlugin");
assertStatusOk(String.format(Locale.ROOT, "install %s --verbose", pluginUrl));
assertFileContent(pluginConfigDir, "test.txt", "version1");
assertFileContent(pluginConfigDir, "test.txt.new", "version2");
assertFileContent(pluginConfigDir, "dir/testdir.txt", "version1");
assertFileContent(pluginConfigDir, "dir/subdir/testsubdir.txt", "version1");
// Removing
assertStatusOk("remove " + pluginName);
assertFileContent(pluginConfigDir, "test.txt", "version1");
assertFileContent(pluginConfigDir, "test.txt.new", "version2");
assertFileContent(pluginConfigDir, "dir/testdir.txt", "version1");
assertFileContent(pluginConfigDir, "dir/subdir/testsubdir.txt", "version1");
// Installing a new plugin version
/*
Third time, our plugin contains:
- config/test.txt (version3)
- config/test2.txt (version1)
- config/dir/testdir.txt (version2)
- config/dir/testdir2.txt (version1)
- config/dir/subdir/testsubdir.txt (version2)
*/
Files.write(pluginDir.resolve("config").resolve("test.txt"), "version3".getBytes(StandardCharsets.UTF_8));
Files.write(pluginDir.resolve("config").resolve("test2.txt"), "version1".getBytes(StandardCharsets.UTF_8));
Files.write(pluginDir.resolve("config").resolve("dir").resolve("testdir.txt"), "version2".getBytes(StandardCharsets.UTF_8));
Files.write(pluginDir.resolve("config").resolve("dir").resolve("testdir2.txt"), "version1".getBytes(StandardCharsets.UTF_8));
Files.write(pluginDir.resolve("config").resolve("dir").resolve("subdir").resolve("testsubdir.txt"), "version2".getBytes(StandardCharsets.UTF_8));
pluginUrl = createPlugin(pluginDir,
"description", "fake desc",
"name", pluginName,
"version", "3.0",
"elasticsearch.version", Version.CURRENT.toString(),
"java.version", System.getProperty("java.specification.version"),
"jvm", "true",
"classname", "FakePlugin");
assertStatusOk(String.format(Locale.ROOT, "install %s --verbose", pluginUrl));
assertFileContent(pluginConfigDir, "test.txt", "version1");
assertFileContent(pluginConfigDir, "test2.txt", "version1");
assertFileContent(pluginConfigDir, "test.txt.new", "version3");
assertFileContent(pluginConfigDir, "dir/testdir.txt", "version1");
assertFileContent(pluginConfigDir, "dir/testdir.txt.new", "version2");
assertFileContent(pluginConfigDir, "dir/testdir2.txt", "version1");
assertFileContent(pluginConfigDir, "dir/subdir/testsubdir.txt", "version1");
assertFileContent(pluginConfigDir, "dir/subdir/testsubdir.txt.new", "version2");
}
// For #7152
public void testLocalPluginInstallWithBinOnly_7152() throws Exception {
String pluginName = "fake-plugin";
Path pluginDir = createTempDir().resolve(pluginName);
// create bin/tool
Files.createDirectories(pluginDir.resolve("bin"));
Files.createFile(pluginDir.resolve("bin").resolve("tool"));;
String pluginUrl = createPlugin(pluginDir,
"description", "fake desc",
"name", "fake-plugin",
"version", "1.0",
"elasticsearch.version", Version.CURRENT.toString(),
"java.version", System.getProperty("java.specification.version"),
"jvm", "true",
"classname", "FakePlugin");
Path binDir = environment.binFile();
Path pluginBinDir = binDir.resolve(pluginName);
assertStatusOk(String.format(Locale.ROOT, "install %s --verbose", pluginUrl));
assertThatPluginIsListed(pluginName);
assertDirectoryExists(pluginBinDir);
}
public void testListInstalledEmpty() throws IOException {
assertStatusOk("list");
assertThat(terminal.getTerminalOutput(), hasItem(containsString("No plugin detected")));
}
public void testListInstalledEmptyWithExistingPluginDirectory() throws IOException {
Files.createDirectory(environment.pluginsFile());
assertStatusOk("list");
assertThat(terminal.getTerminalOutput(), hasItem(containsString("No plugin detected")));
}
public void testInstallPluginVerbose() throws IOException {
String pluginName = "fake-plugin";
Path pluginDir = createTempDir().resolve(pluginName);
String pluginUrl = createPlugin(pluginDir,
"description", "fake desc",
"name", pluginName,
"version", "1.0",
"elasticsearch.version", Version.CURRENT.toString(),
"java.version", System.getProperty("java.specification.version"),
"jvm", "true",
"classname", "FakePlugin");
System.err.println("install " + pluginUrl + " --verbose");
ExitStatus status = new PluginManagerCliParser(terminal).execute(args("install " + pluginUrl + " --verbose"));
assertThat("Terminal output was: " + terminal.getTerminalOutput(), status, is(ExitStatus.OK));
assertThat(terminal.getTerminalOutput(), hasItem(containsString("Name: fake-plugin")));
assertThat(terminal.getTerminalOutput(), hasItem(containsString("Description: fake desc")));
assertThat(terminal.getTerminalOutput(), hasItem(containsString("Site: false")));
assertThat(terminal.getTerminalOutput(), hasItem(containsString("Version: 1.0")));
assertThat(terminal.getTerminalOutput(), hasItem(containsString("JVM: true")));
assertThatPluginIsListed(pluginName);
}
public void testInstallPlugin() throws IOException {
String pluginName = "fake-plugin";
Path pluginDir = createTempDir().resolve(pluginName);
String pluginUrl = createPlugin(pluginDir,
"description", "fake desc",
"name", pluginName,
"version", "1.0",
"elasticsearch.version", Version.CURRENT.toString(),
"java.version", System.getProperty("java.specification.version"),
"jvm", "true",
"classname", "FakePlugin");
ExitStatus status = new PluginManagerCliParser(terminal).execute(args("install " + pluginUrl));
assertThat("Terminal output was: " + terminal.getTerminalOutput(), status, is(ExitStatus.OK));
assertThat(terminal.getTerminalOutput(), not(hasItem(containsString("Name: fake-plugin"))));
assertThat(terminal.getTerminalOutput(), not(hasItem(containsString("Description:"))));
assertThat(terminal.getTerminalOutput(), not(hasItem(containsString("Site:"))));
assertThat(terminal.getTerminalOutput(), not(hasItem(containsString("Version:"))));
assertThat(terminal.getTerminalOutput(), not(hasItem(containsString("JVM:"))));
assertThatPluginIsListed(pluginName);
}
/**
* @deprecated support for this is not going to stick around, seriously.
*/
@Deprecated
public void testAlreadyInstalledNotIsolated() throws Exception {
String pluginName = "fake-plugin";
Path pluginDir = createTempDir().resolve(pluginName);
Files.createDirectories(pluginDir);
// create a jar file in the plugin
Path pluginJar = pluginDir.resolve("fake-plugin.jar");
try (ZipOutputStream out = new JarOutputStream(Files.newOutputStream(pluginJar, StandardOpenOption.CREATE))) {
out.putNextEntry(new ZipEntry("foo.class"));
out.closeEntry();
}
String pluginUrl = createPlugin(pluginDir,
"description", "fake desc",
"name", pluginName,
"version", "1.0",
"elasticsearch.version", Version.CURRENT.toString(),
"java.version", System.getProperty("java.specification.version"),
"isolated", "false",
"jvm", "true",
"classname", "FakePlugin");
// install
ExitStatus status = new PluginManagerCliParser(terminal).execute(args("install " + pluginUrl));
assertEquals("unexpected exit status: output: " + terminal.getTerminalOutput(), ExitStatus.OK, status);
// install again
status = new PluginManagerCliParser(terminal).execute(args("install " + pluginUrl));
List<String> output = terminal.getTerminalOutput();
assertEquals("unexpected exit status: output: " + output, ExitStatus.IO_ERROR, status);
boolean foundExpectedMessage = false;
for (String line : output) {
foundExpectedMessage |= line.contains("already exists");
}
assertTrue(foundExpectedMessage);
}
public void testInstallSitePluginVerbose() throws IOException {
String pluginName = "fake-plugin";
Path pluginDir = createTempDir().resolve(pluginName);
Files.createDirectories(pluginDir.resolve("_site"));
Files.createFile(pluginDir.resolve("_site").resolve("somefile"));
String pluginUrl = createPlugin(pluginDir,
"description", "fake desc",
"name", pluginName,
"version", "1.0",
"site", "true");
ExitStatus status = new PluginManagerCliParser(terminal).execute(args("install " + pluginUrl + " --verbose"));
assertThat("Terminal output was: " + terminal.getTerminalOutput(), status, is(ExitStatus.OK));
assertThat(terminal.getTerminalOutput(), hasItem(containsString("Name: fake-plugin")));
assertThat(terminal.getTerminalOutput(), hasItem(containsString("Description: fake desc")));
assertThat(terminal.getTerminalOutput(), hasItem(containsString("Site: true")));
assertThat(terminal.getTerminalOutput(), hasItem(containsString("Version: 1.0")));
assertThat(terminal.getTerminalOutput(), hasItem(containsString("JVM: false")));
assertThatPluginIsListed(pluginName);
// We want to check that Plugin Manager moves content to _site
assertFileExists(environment.pluginsFile().resolve(pluginName).resolve("_site"));
}
public void testInstallSitePlugin() throws IOException {
String pluginName = "fake-plugin";
Path pluginDir = createTempDir().resolve(pluginName);
Files.createDirectories(pluginDir.resolve("_site"));
Files.createFile(pluginDir.resolve("_site").resolve("somefile"));
String pluginUrl = createPlugin(pluginDir,
"description", "fake desc",
"name", pluginName,
"version", "1.0",
"site", "true");
ExitStatus status = new PluginManagerCliParser(terminal).execute(args("install " + pluginUrl));
assertThat("Terminal output was: " + terminal.getTerminalOutput(), status, is(ExitStatus.OK));
assertThat(terminal.getTerminalOutput(), not(hasItem(containsString("Name: fake-plugin"))));
assertThat(terminal.getTerminalOutput(), not(hasItem(containsString("Description:"))));
assertThat(terminal.getTerminalOutput(), not(hasItem(containsString("Site:"))));
assertThat(terminal.getTerminalOutput(), not(hasItem(containsString("Version:"))));
assertThat(terminal.getTerminalOutput(), not(hasItem(containsString("JVM:"))));
assertThatPluginIsListed(pluginName);
// We want to check that Plugin Manager moves content to _site
assertFileExists(environment.pluginsFile().resolve(pluginName).resolve("_site"));
}
public void testInstallPluginWithBadChecksum() throws IOException {
String pluginName = "fake-plugin";
Path pluginDir = createTempDir().resolve(pluginName);
Files.createDirectories(pluginDir.resolve("_site"));
Files.createFile(pluginDir.resolve("_site").resolve("somefile"));
String pluginUrl = createPluginWithBadChecksum(pluginDir,
"description", "fake desc",
"version", "1.0",
"site", "true");
assertStatus(String.format(Locale.ROOT, "install %s --verbose", pluginUrl),
ExitStatus.IO_ERROR);
assertThatPluginIsNotListed(pluginName);
assertFileNotExists(environment.pluginsFile().resolve(pluginName).resolve("_site"));
}
private void singlePluginInstallAndRemove(String pluginDescriptor, String pluginName, String pluginCoordinates) throws IOException {
logger.info("--> trying to download and install [{}]", pluginDescriptor);
if (pluginCoordinates == null) {
assertStatusOk(String.format(Locale.ROOT, "install %s --verbose", pluginDescriptor));
} else {
assertStatusOk(String.format(Locale.ROOT, "install %s --verbose", pluginCoordinates));
}
assertThatPluginIsListed(pluginName);
terminal.getTerminalOutput().clear();
assertStatusOk("remove " + pluginDescriptor);
assertThat(terminal.getTerminalOutput(), hasItem(containsString("Removing " + pluginDescriptor)));
// not listed anymore
terminal.getTerminalOutput().clear();
assertStatusOk("list");
assertThat(terminal.getTerminalOutput(), not(hasItem(containsString(pluginName))));
}
/**
* We are ignoring by default these tests as they require to have an internet access
* To activate the test, use -Dtests.network=true
* We test regular form: username/reponame/version
* It should find it in download.elasticsearch.org service
*/
@Network
@AwaitsFix(bugUrl = "fails with jar hell failures - http://build-us-00.elastic.co/job/es_core_master_oracle_6/519/testReport/")
public void testInstallPluginWithElasticsearchDownloadService() throws IOException {
assumeTrue("download.elastic.co is accessible", isDownloadServiceWorking("download.elastic.co", 80, "/elasticsearch/ci-test.txt"));
singlePluginInstallAndRemove("elasticsearch/elasticsearch-transport-thrift/2.4.0", "elasticsearch-transport-thrift", null);
}
/**
* We are ignoring by default these tests as they require to have an internet access
* To activate the test, use -Dtests.network=true
* We test regular form: groupId/artifactId/version
* It should find it in maven central service
*/
@Network
@AwaitsFix(bugUrl = "fails with jar hell failures - http://build-us-00.elastic.co/job/es_core_master_oracle_6/519/testReport/")
public void testInstallPluginWithMavenCentral() throws IOException {
assumeTrue("search.maven.org is accessible", isDownloadServiceWorking("search.maven.org", 80, "/"));
assumeTrue("repo1.maven.org is accessible", isDownloadServiceWorking("repo1.maven.org", 443, "/maven2/org/elasticsearch/elasticsearch-transport-thrift/2.4.0/elasticsearch-transport-thrift-2.4.0.pom"));
singlePluginInstallAndRemove("org.elasticsearch/elasticsearch-transport-thrift/2.4.0", "elasticsearch-transport-thrift", null);
}
/**
* We are ignoring by default these tests as they require to have an internet access
* To activate the test, use -Dtests.network=true
* We test site plugins from github: userName/repoName
* It should find it on github
*/
@Network @AwaitsFix(bugUrl = "needs to be adapted to 2.0")
public void testInstallPluginWithGithub() throws IOException {
assumeTrue("github.com is accessible", isDownloadServiceWorking("github.com", 443, "/"));
singlePluginInstallAndRemove("elasticsearch/kibana", "kibana", null);
}
private boolean isDownloadServiceWorking(String host, int port, String resource) {
try {
String protocol = port == 443 ? "https" : "http";
HttpResponse response = new HttpRequestBuilder(HttpClients.createDefault()).protocol(protocol).host(host).port(port).path(resource).execute();
if (response.getStatusCode() != 200) {
logger.warn("[{}{}] download service is not working. Disabling current test.", host, resource);
return false;
}
return true;
} catch (Throwable t) {
logger.warn("[{}{}] download service is not working. Disabling current test.", host, resource);
}
return false;
}
public void testRemovePlugin() throws Exception {
String pluginName = "plugintest";
Path pluginDir = createTempDir().resolve(pluginName);
String pluginUrl = createPlugin(pluginDir,
"description", "fake desc",
"name", pluginName,
"version", "1.0.0",
"elasticsearch.version", Version.CURRENT.toString(),
"java.version", System.getProperty("java.specification.version"),
"jvm", "true",
"classname", "FakePlugin");
// We want to remove plugin with plugin short name
singlePluginInstallAndRemove("plugintest", "plugintest", pluginUrl);
// We want to remove plugin with groupid/artifactid/version form
singlePluginInstallAndRemove("groupid/plugintest/1.0.0", "plugintest", pluginUrl);
// We want to remove plugin with groupid/artifactid form
singlePluginInstallAndRemove("groupid/plugintest", "plugintest", pluginUrl);
}
public void testRemovePlugin_NullName_ThrowsException() throws IOException {
assertStatus("remove ", USAGE);
}
public void testRemovePluginWithURLForm() throws Exception {
assertStatus("remove file://whatever", USAGE);
assertThat(terminal.getTerminalOutput(), hasItem(containsString("Illegal plugin name")));
}
public void testForbiddenPluginNames() throws IOException {
assertStatus("remove elasticsearch", USAGE);
assertStatus("remove elasticsearch.bat", USAGE);
assertStatus("remove elasticsearch.in.sh", USAGE);
assertStatus("remove plugin", USAGE);
assertStatus("remove plugin.bat", USAGE);
assertStatus("remove service.bat", USAGE);
assertStatus("remove ELASTICSEARCH", USAGE);
assertStatus("remove ELASTICSEARCH.IN.SH", USAGE);
}
public void testOfficialPluginName_ThrowsException() throws IOException {
PluginManager.checkForOfficialPlugins("analysis-icu");
PluginManager.checkForOfficialPlugins("analysis-kuromoji");
PluginManager.checkForOfficialPlugins("analysis-phonetic");
PluginManager.checkForOfficialPlugins("analysis-smartcn");
PluginManager.checkForOfficialPlugins("analysis-stempel");
PluginManager.checkForOfficialPlugins("delete-by-query");
PluginManager.checkForOfficialPlugins("lang-expression");
PluginManager.checkForOfficialPlugins("lang-groovy");
PluginManager.checkForOfficialPlugins("lang-javascript");
PluginManager.checkForOfficialPlugins("lang-python");
PluginManager.checkForOfficialPlugins("mapper-attachments");
PluginManager.checkForOfficialPlugins("mapper-murmur3");
PluginManager.checkForOfficialPlugins("mapper-size");
PluginManager.checkForOfficialPlugins("discovery-multicast");
PluginManager.checkForOfficialPlugins("discovery-azure");
PluginManager.checkForOfficialPlugins("discovery-ec2");
PluginManager.checkForOfficialPlugins("discovery-gce");
PluginManager.checkForOfficialPlugins("repository-azure");
PluginManager.checkForOfficialPlugins("repository-s3");
PluginManager.checkForOfficialPlugins("store-smb");
try {
PluginManager.checkForOfficialPlugins("elasticsearch-mapper-attachment");
fail("elasticsearch-mapper-attachment should not be allowed");
} catch (IllegalArgumentException e) {
// We expect that error
}
}
public void testThatBasicAuthIsRejectedOnHttp() throws Exception {
assertStatus(String.format(Locale.ROOT, "install http://user:pass@localhost:12345/foo.zip --verbose"), CliTool.ExitStatus.IO_ERROR);
assertThat(terminal.getTerminalOutput(), hasItem(containsString("Basic auth is only supported for HTTPS!")));
}
public void testThatBasicAuthIsSupportedWithHttps() throws Exception {
assumeTrue("test requires security manager to be disabled", System.getSecurityManager() == null);
SSLSocketFactory defaultSocketFactory = HttpsURLConnection.getDefaultSSLSocketFactory();
ServerBootstrap serverBootstrap = new ServerBootstrap(new NioServerSocketChannelFactory());
SelfSignedCertificate ssc = null;
try {
try {
ssc = new SelfSignedCertificate("localhost");
} catch (Exception e) {
assumeNoException("self signing shenanigans not supported by this JDK", e);
}
// Create a trust manager that does not validate certificate chains:
SSLContext sc = SSLContext.getInstance("SSL");
sc.init(null, InsecureTrustManagerFactory.INSTANCE.getTrustManagers(), null);
HttpsURLConnection.setDefaultSSLSocketFactory(sc.getSocketFactory());
final List<HttpRequest> requests = new ArrayList<>();
final SslContext sslContext = SslContext.newServerContext(ssc.certificate(), ssc.privateKey());
serverBootstrap.setPipelineFactory(new ChannelPipelineFactory() {
@Override
public ChannelPipeline getPipeline() throws Exception {
return Channels.pipeline(
new SslHandler(sslContext.newEngine()),
new HttpRequestDecoder(),
new HttpResponseEncoder(),
new LoggingServerHandler(requests)
);
}
});
Channel channel = serverBootstrap.bind(new InetSocketAddress(InetAddress.getByName("localhost"), 0));
int port = ((InetSocketAddress) channel.getLocalAddress()).getPort();
// IO_ERROR because there is no real file delivered...
assertStatus(String.format(Locale.ROOT, "install https://user:pass@localhost:%s/foo.zip --verbose --timeout 1s", port), ExitStatus.IO_ERROR);
// ensure that we did not try any other data source like download.elastic.co, in case we specified our own local URL
assertThat(terminal.getTerminalOutput(), not(hasItem(containsString("download.elastic.co"))));
assertThat(requests, hasSize(1));
String msg = String.format(Locale.ROOT, "Request header did not contain Authorization header, terminal output was: %s", terminal.getTerminalOutput());
assertThat(msg, requests.get(0).headers().contains("Authorization"), is(true));
assertThat(msg, requests.get(0).headers().get("Authorization"), is("Basic " + Base64.encodeBytes("user:pass".getBytes(StandardCharsets.UTF_8))));
} finally {
HttpsURLConnection.setDefaultSSLSocketFactory(defaultSocketFactory);
serverBootstrap.releaseExternalResources();
if (ssc != null) {
ssc.delete();
}
}
}
private static class LoggingServerHandler extends SimpleChannelUpstreamHandler {
private List<HttpRequest> requests;
public LoggingServerHandler(List<HttpRequest> requests) {
this.requests = requests;
}
@Override
public void messageReceived(final ChannelHandlerContext ctx, final MessageEvent e) throws InterruptedException {
final HttpRequest request = (HttpRequest) e.getMessage();
requests.add(request);
final org.jboss.netty.handler.codec.http.HttpResponse response = new DefaultHttpResponse(HTTP_1_1, HttpResponseStatus.BAD_REQUEST);
ctx.getChannel().write(response);
}
}
private Environment buildInitialSettings() throws IOException {
Settings settings = settingsBuilder()
.put("http.enabled", true)
.put("path.home", createTempDir()).build();
return InternalSettingsPreparer.prepareEnvironment(settings, null);
}
private void assertStatusOk(String command) {
assertStatus(command, ExitStatus.OK);
}
private void assertStatus(String command, ExitStatus exitStatus) {
ExitStatus status = new PluginManagerCliParser(terminal).execute(args(command));
assertThat("Terminal output was: " + terminal.getTerminalOutput(), status, is(exitStatus));
}
private void assertThatPluginIsListed(String pluginName) {
terminal.getTerminalOutput().clear();
assertStatusOk("list");
String message = String.format(Locale.ROOT, "Terminal output was: %s", terminal.getTerminalOutput());
assertThat(message, terminal.getTerminalOutput(), hasItem(containsString(pluginName)));
}
private void assertThatPluginIsNotListed(String pluginName) {
terminal.getTerminalOutput().clear();
assertStatusOk("list");
String message = String.format(Locale.ROOT, "Terminal output was: %s", terminal.getTerminalOutput());
assertFalse(message, terminal.getTerminalOutput().contains(pluginName));
}
}
| apache-2.0 |
hortonworks/hortonworks-sandbox | desktop/libs/hadoop/java/src/main/gen-java/org/apache/hadoop/thriftfs/api/DFSHealthReport.java | 32474 | /**
* Autogenerated by Thrift Compiler (0.7.0)
*
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
*/
package org.apache.hadoop.thriftfs.api;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.util.HashMap;
import java.util.EnumMap;
import java.util.Set;
import java.util.HashSet;
import java.util.EnumSet;
import java.util.Collections;
import java.util.BitSet;
import java.nio.ByteBuffer;
import java.util.Arrays;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Information that mirrors the "health report" information available on the
* NameNode web UI
*/
public class DFSHealthReport implements org.apache.thrift.TBase<DFSHealthReport, DFSHealthReport._Fields>, java.io.Serializable, Cloneable {
private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("DFSHealthReport");
private static final org.apache.thrift.protocol.TField BYTES_TOTAL_FIELD_DESC = new org.apache.thrift.protocol.TField("bytesTotal", org.apache.thrift.protocol.TType.I64, (short)1);
private static final org.apache.thrift.protocol.TField BYTES_USED_FIELD_DESC = new org.apache.thrift.protocol.TField("bytesUsed", org.apache.thrift.protocol.TType.I64, (short)2);
private static final org.apache.thrift.protocol.TField BYTES_REMAINING_FIELD_DESC = new org.apache.thrift.protocol.TField("bytesRemaining", org.apache.thrift.protocol.TType.I64, (short)3);
private static final org.apache.thrift.protocol.TField BYTES_NON_DFS_FIELD_DESC = new org.apache.thrift.protocol.TField("bytesNonDfs", org.apache.thrift.protocol.TType.I64, (short)4);
private static final org.apache.thrift.protocol.TField NUM_LIVE_DATA_NODES_FIELD_DESC = new org.apache.thrift.protocol.TField("numLiveDataNodes", org.apache.thrift.protocol.TType.I32, (short)5);
private static final org.apache.thrift.protocol.TField NUM_DEAD_DATA_NODES_FIELD_DESC = new org.apache.thrift.protocol.TField("numDeadDataNodes", org.apache.thrift.protocol.TType.I32, (short)6);
private static final org.apache.thrift.protocol.TField UPGRADE_STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("upgradeStatus", org.apache.thrift.protocol.TType.STRUCT, (short)7);
private static final org.apache.thrift.protocol.TField HTTP_PORT_FIELD_DESC = new org.apache.thrift.protocol.TField("httpPort", org.apache.thrift.protocol.TType.I32, (short)8);
public long bytesTotal; // required
public long bytesUsed; // required
public long bytesRemaining; // required
public long bytesNonDfs; // required
/**
* How many datanodes are considered live
*/
public int numLiveDataNodes; // required
/**
* How many datanodes are considered dead
*/
public int numDeadDataNodes; // required
/**
* Status of the current running upgrade. If no upgrade
* is running, this will be null.
*/
public UpgradeStatusReport upgradeStatus; // required
/**
* The http port that the NameNode is listening on for its web UI
* - this isn't really health, but it's related and handy
*/
public int httpPort; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
BYTES_TOTAL((short)1, "bytesTotal"),
BYTES_USED((short)2, "bytesUsed"),
BYTES_REMAINING((short)3, "bytesRemaining"),
BYTES_NON_DFS((short)4, "bytesNonDfs"),
/**
* How many datanodes are considered live
*/
NUM_LIVE_DATA_NODES((short)5, "numLiveDataNodes"),
/**
* How many datanodes are considered dead
*/
NUM_DEAD_DATA_NODES((short)6, "numDeadDataNodes"),
/**
* Status of the current running upgrade. If no upgrade
* is running, this will be null.
*/
UPGRADE_STATUS((short)7, "upgradeStatus"),
/**
* The http port that the NameNode is listening on for its web UI
* - this isn't really health, but it's related and handy
*/
HTTP_PORT((short)8, "httpPort");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
byName.put(field.getFieldName(), field);
}
}
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 1: // BYTES_TOTAL
return BYTES_TOTAL;
case 2: // BYTES_USED
return BYTES_USED;
case 3: // BYTES_REMAINING
return BYTES_REMAINING;
case 4: // BYTES_NON_DFS
return BYTES_NON_DFS;
case 5: // NUM_LIVE_DATA_NODES
return NUM_LIVE_DATA_NODES;
case 6: // NUM_DEAD_DATA_NODES
return NUM_DEAD_DATA_NODES;
case 7: // UPGRADE_STATUS
return UPGRADE_STATUS;
case 8: // HTTP_PORT
return HTTP_PORT;
default:
return null;
}
}
/**
* Find the _Fields constant that matches fieldId, throwing an exception
* if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
return fields;
}
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
private final short _thriftId;
private final String _fieldName;
_Fields(short thriftId, String fieldName) {
_thriftId = thriftId;
_fieldName = fieldName;
}
public short getThriftFieldId() {
return _thriftId;
}
public String getFieldName() {
return _fieldName;
}
}
// isset id assignments
private static final int __BYTESTOTAL_ISSET_ID = 0;
private static final int __BYTESUSED_ISSET_ID = 1;
private static final int __BYTESREMAINING_ISSET_ID = 2;
private static final int __BYTESNONDFS_ISSET_ID = 3;
private static final int __NUMLIVEDATANODES_ISSET_ID = 4;
private static final int __NUMDEADDATANODES_ISSET_ID = 5;
private static final int __HTTPPORT_ISSET_ID = 6;
private BitSet __isset_bit_vector = new BitSet(7);
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.BYTES_TOTAL, new org.apache.thrift.meta_data.FieldMetaData("bytesTotal", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
tmpMap.put(_Fields.BYTES_USED, new org.apache.thrift.meta_data.FieldMetaData("bytesUsed", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
tmpMap.put(_Fields.BYTES_REMAINING, new org.apache.thrift.meta_data.FieldMetaData("bytesRemaining", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
tmpMap.put(_Fields.BYTES_NON_DFS, new org.apache.thrift.meta_data.FieldMetaData("bytesNonDfs", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
tmpMap.put(_Fields.NUM_LIVE_DATA_NODES, new org.apache.thrift.meta_data.FieldMetaData("numLiveDataNodes", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
tmpMap.put(_Fields.NUM_DEAD_DATA_NODES, new org.apache.thrift.meta_data.FieldMetaData("numDeadDataNodes", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
tmpMap.put(_Fields.UPGRADE_STATUS, new org.apache.thrift.meta_data.FieldMetaData("upgradeStatus", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, UpgradeStatusReport.class)));
tmpMap.put(_Fields.HTTP_PORT, new org.apache.thrift.meta_data.FieldMetaData("httpPort", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(DFSHealthReport.class, metaDataMap);
}
public DFSHealthReport() {
}
public DFSHealthReport(
long bytesTotal,
long bytesUsed,
long bytesRemaining,
long bytesNonDfs,
int numLiveDataNodes,
int numDeadDataNodes,
UpgradeStatusReport upgradeStatus,
int httpPort)
{
this();
this.bytesTotal = bytesTotal;
setBytesTotalIsSet(true);
this.bytesUsed = bytesUsed;
setBytesUsedIsSet(true);
this.bytesRemaining = bytesRemaining;
setBytesRemainingIsSet(true);
this.bytesNonDfs = bytesNonDfs;
setBytesNonDfsIsSet(true);
this.numLiveDataNodes = numLiveDataNodes;
setNumLiveDataNodesIsSet(true);
this.numDeadDataNodes = numDeadDataNodes;
setNumDeadDataNodesIsSet(true);
this.upgradeStatus = upgradeStatus;
this.httpPort = httpPort;
setHttpPortIsSet(true);
}
/**
* Performs a deep copy on <i>other</i>.
*/
public DFSHealthReport(DFSHealthReport other) {
__isset_bit_vector.clear();
__isset_bit_vector.or(other.__isset_bit_vector);
this.bytesTotal = other.bytesTotal;
this.bytesUsed = other.bytesUsed;
this.bytesRemaining = other.bytesRemaining;
this.bytesNonDfs = other.bytesNonDfs;
this.numLiveDataNodes = other.numLiveDataNodes;
this.numDeadDataNodes = other.numDeadDataNodes;
if (other.isSetUpgradeStatus()) {
this.upgradeStatus = new UpgradeStatusReport(other.upgradeStatus);
}
this.httpPort = other.httpPort;
}
public DFSHealthReport deepCopy() {
return new DFSHealthReport(this);
}
@Override
public void clear() {
setBytesTotalIsSet(false);
this.bytesTotal = 0;
setBytesUsedIsSet(false);
this.bytesUsed = 0;
setBytesRemainingIsSet(false);
this.bytesRemaining = 0;
setBytesNonDfsIsSet(false);
this.bytesNonDfs = 0;
setNumLiveDataNodesIsSet(false);
this.numLiveDataNodes = 0;
setNumDeadDataNodesIsSet(false);
this.numDeadDataNodes = 0;
this.upgradeStatus = null;
setHttpPortIsSet(false);
this.httpPort = 0;
}
public long getBytesTotal() {
return this.bytesTotal;
}
public DFSHealthReport setBytesTotal(long bytesTotal) {
this.bytesTotal = bytesTotal;
setBytesTotalIsSet(true);
return this;
}
public void unsetBytesTotal() {
__isset_bit_vector.clear(__BYTESTOTAL_ISSET_ID);
}
/** Returns true if field bytesTotal is set (has been assigned a value) and false otherwise */
public boolean isSetBytesTotal() {
return __isset_bit_vector.get(__BYTESTOTAL_ISSET_ID);
}
public void setBytesTotalIsSet(boolean value) {
__isset_bit_vector.set(__BYTESTOTAL_ISSET_ID, value);
}
public long getBytesUsed() {
return this.bytesUsed;
}
public DFSHealthReport setBytesUsed(long bytesUsed) {
this.bytesUsed = bytesUsed;
setBytesUsedIsSet(true);
return this;
}
public void unsetBytesUsed() {
__isset_bit_vector.clear(__BYTESUSED_ISSET_ID);
}
/** Returns true if field bytesUsed is set (has been assigned a value) and false otherwise */
public boolean isSetBytesUsed() {
return __isset_bit_vector.get(__BYTESUSED_ISSET_ID);
}
public void setBytesUsedIsSet(boolean value) {
__isset_bit_vector.set(__BYTESUSED_ISSET_ID, value);
}
public long getBytesRemaining() {
return this.bytesRemaining;
}
public DFSHealthReport setBytesRemaining(long bytesRemaining) {
this.bytesRemaining = bytesRemaining;
setBytesRemainingIsSet(true);
return this;
}
public void unsetBytesRemaining() {
__isset_bit_vector.clear(__BYTESREMAINING_ISSET_ID);
}
/** Returns true if field bytesRemaining is set (has been assigned a value) and false otherwise */
public boolean isSetBytesRemaining() {
return __isset_bit_vector.get(__BYTESREMAINING_ISSET_ID);
}
public void setBytesRemainingIsSet(boolean value) {
__isset_bit_vector.set(__BYTESREMAINING_ISSET_ID, value);
}
public long getBytesNonDfs() {
return this.bytesNonDfs;
}
public DFSHealthReport setBytesNonDfs(long bytesNonDfs) {
this.bytesNonDfs = bytesNonDfs;
setBytesNonDfsIsSet(true);
return this;
}
public void unsetBytesNonDfs() {
__isset_bit_vector.clear(__BYTESNONDFS_ISSET_ID);
}
/** Returns true if field bytesNonDfs is set (has been assigned a value) and false otherwise */
public boolean isSetBytesNonDfs() {
return __isset_bit_vector.get(__BYTESNONDFS_ISSET_ID);
}
public void setBytesNonDfsIsSet(boolean value) {
__isset_bit_vector.set(__BYTESNONDFS_ISSET_ID, value);
}
/**
* How many datanodes are considered live
*/
public int getNumLiveDataNodes() {
return this.numLiveDataNodes;
}
/**
* How many datanodes are considered live
*/
public DFSHealthReport setNumLiveDataNodes(int numLiveDataNodes) {
this.numLiveDataNodes = numLiveDataNodes;
setNumLiveDataNodesIsSet(true);
return this;
}
public void unsetNumLiveDataNodes() {
__isset_bit_vector.clear(__NUMLIVEDATANODES_ISSET_ID);
}
/** Returns true if field numLiveDataNodes is set (has been assigned a value) and false otherwise */
public boolean isSetNumLiveDataNodes() {
return __isset_bit_vector.get(__NUMLIVEDATANODES_ISSET_ID);
}
public void setNumLiveDataNodesIsSet(boolean value) {
__isset_bit_vector.set(__NUMLIVEDATANODES_ISSET_ID, value);
}
/**
* How many datanodes are considered dead
*/
public int getNumDeadDataNodes() {
return this.numDeadDataNodes;
}
/**
* How many datanodes are considered dead
*/
public DFSHealthReport setNumDeadDataNodes(int numDeadDataNodes) {
this.numDeadDataNodes = numDeadDataNodes;
setNumDeadDataNodesIsSet(true);
return this;
}
public void unsetNumDeadDataNodes() {
__isset_bit_vector.clear(__NUMDEADDATANODES_ISSET_ID);
}
/** Returns true if field numDeadDataNodes is set (has been assigned a value) and false otherwise */
public boolean isSetNumDeadDataNodes() {
return __isset_bit_vector.get(__NUMDEADDATANODES_ISSET_ID);
}
public void setNumDeadDataNodesIsSet(boolean value) {
__isset_bit_vector.set(__NUMDEADDATANODES_ISSET_ID, value);
}
/**
* Status of the current running upgrade. If no upgrade
* is running, this will be null.
*/
public UpgradeStatusReport getUpgradeStatus() {
return this.upgradeStatus;
}
/**
* Status of the current running upgrade. If no upgrade
* is running, this will be null.
*/
public DFSHealthReport setUpgradeStatus(UpgradeStatusReport upgradeStatus) {
this.upgradeStatus = upgradeStatus;
return this;
}
public void unsetUpgradeStatus() {
this.upgradeStatus = null;
}
/** Returns true if field upgradeStatus is set (has been assigned a value) and false otherwise */
public boolean isSetUpgradeStatus() {
return this.upgradeStatus != null;
}
public void setUpgradeStatusIsSet(boolean value) {
if (!value) {
this.upgradeStatus = null;
}
}
/**
* The http port that the NameNode is listening on for its web UI
* - this isn't really health, but it's related and handy
*/
public int getHttpPort() {
return this.httpPort;
}
/**
* The http port that the NameNode is listening on for its web UI
* - this isn't really health, but it's related and handy
*/
public DFSHealthReport setHttpPort(int httpPort) {
this.httpPort = httpPort;
setHttpPortIsSet(true);
return this;
}
public void unsetHttpPort() {
__isset_bit_vector.clear(__HTTPPORT_ISSET_ID);
}
/** Returns true if field httpPort is set (has been assigned a value) and false otherwise */
public boolean isSetHttpPort() {
return __isset_bit_vector.get(__HTTPPORT_ISSET_ID);
}
public void setHttpPortIsSet(boolean value) {
__isset_bit_vector.set(__HTTPPORT_ISSET_ID, value);
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case BYTES_TOTAL:
if (value == null) {
unsetBytesTotal();
} else {
setBytesTotal((Long)value);
}
break;
case BYTES_USED:
if (value == null) {
unsetBytesUsed();
} else {
setBytesUsed((Long)value);
}
break;
case BYTES_REMAINING:
if (value == null) {
unsetBytesRemaining();
} else {
setBytesRemaining((Long)value);
}
break;
case BYTES_NON_DFS:
if (value == null) {
unsetBytesNonDfs();
} else {
setBytesNonDfs((Long)value);
}
break;
case NUM_LIVE_DATA_NODES:
if (value == null) {
unsetNumLiveDataNodes();
} else {
setNumLiveDataNodes((Integer)value);
}
break;
case NUM_DEAD_DATA_NODES:
if (value == null) {
unsetNumDeadDataNodes();
} else {
setNumDeadDataNodes((Integer)value);
}
break;
case UPGRADE_STATUS:
if (value == null) {
unsetUpgradeStatus();
} else {
setUpgradeStatus((UpgradeStatusReport)value);
}
break;
case HTTP_PORT:
if (value == null) {
unsetHttpPort();
} else {
setHttpPort((Integer)value);
}
break;
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
case BYTES_TOTAL:
return Long.valueOf(getBytesTotal());
case BYTES_USED:
return Long.valueOf(getBytesUsed());
case BYTES_REMAINING:
return Long.valueOf(getBytesRemaining());
case BYTES_NON_DFS:
return Long.valueOf(getBytesNonDfs());
case NUM_LIVE_DATA_NODES:
return Integer.valueOf(getNumLiveDataNodes());
case NUM_DEAD_DATA_NODES:
return Integer.valueOf(getNumDeadDataNodes());
case UPGRADE_STATUS:
return getUpgradeStatus();
case HTTP_PORT:
return Integer.valueOf(getHttpPort());
}
throw new IllegalStateException();
}
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case BYTES_TOTAL:
return isSetBytesTotal();
case BYTES_USED:
return isSetBytesUsed();
case BYTES_REMAINING:
return isSetBytesRemaining();
case BYTES_NON_DFS:
return isSetBytesNonDfs();
case NUM_LIVE_DATA_NODES:
return isSetNumLiveDataNodes();
case NUM_DEAD_DATA_NODES:
return isSetNumDeadDataNodes();
case UPGRADE_STATUS:
return isSetUpgradeStatus();
case HTTP_PORT:
return isSetHttpPort();
}
throw new IllegalStateException();
}
@Override
public boolean equals(Object that) {
if (that == null)
return false;
if (that instanceof DFSHealthReport)
return this.equals((DFSHealthReport)that);
return false;
}
public boolean equals(DFSHealthReport that) {
if (that == null)
return false;
boolean this_present_bytesTotal = true;
boolean that_present_bytesTotal = true;
if (this_present_bytesTotal || that_present_bytesTotal) {
if (!(this_present_bytesTotal && that_present_bytesTotal))
return false;
if (this.bytesTotal != that.bytesTotal)
return false;
}
boolean this_present_bytesUsed = true;
boolean that_present_bytesUsed = true;
if (this_present_bytesUsed || that_present_bytesUsed) {
if (!(this_present_bytesUsed && that_present_bytesUsed))
return false;
if (this.bytesUsed != that.bytesUsed)
return false;
}
boolean this_present_bytesRemaining = true;
boolean that_present_bytesRemaining = true;
if (this_present_bytesRemaining || that_present_bytesRemaining) {
if (!(this_present_bytesRemaining && that_present_bytesRemaining))
return false;
if (this.bytesRemaining != that.bytesRemaining)
return false;
}
boolean this_present_bytesNonDfs = true;
boolean that_present_bytesNonDfs = true;
if (this_present_bytesNonDfs || that_present_bytesNonDfs) {
if (!(this_present_bytesNonDfs && that_present_bytesNonDfs))
return false;
if (this.bytesNonDfs != that.bytesNonDfs)
return false;
}
boolean this_present_numLiveDataNodes = true;
boolean that_present_numLiveDataNodes = true;
if (this_present_numLiveDataNodes || that_present_numLiveDataNodes) {
if (!(this_present_numLiveDataNodes && that_present_numLiveDataNodes))
return false;
if (this.numLiveDataNodes != that.numLiveDataNodes)
return false;
}
boolean this_present_numDeadDataNodes = true;
boolean that_present_numDeadDataNodes = true;
if (this_present_numDeadDataNodes || that_present_numDeadDataNodes) {
if (!(this_present_numDeadDataNodes && that_present_numDeadDataNodes))
return false;
if (this.numDeadDataNodes != that.numDeadDataNodes)
return false;
}
boolean this_present_upgradeStatus = true && this.isSetUpgradeStatus();
boolean that_present_upgradeStatus = true && that.isSetUpgradeStatus();
if (this_present_upgradeStatus || that_present_upgradeStatus) {
if (!(this_present_upgradeStatus && that_present_upgradeStatus))
return false;
if (!this.upgradeStatus.equals(that.upgradeStatus))
return false;
}
boolean this_present_httpPort = true;
boolean that_present_httpPort = true;
if (this_present_httpPort || that_present_httpPort) {
if (!(this_present_httpPort && that_present_httpPort))
return false;
if (this.httpPort != that.httpPort)
return false;
}
return true;
}
@Override
public int hashCode() {
return 0;
}
public int compareTo(DFSHealthReport other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
DFSHealthReport typedOther = (DFSHealthReport)other;
lastComparison = Boolean.valueOf(isSetBytesTotal()).compareTo(typedOther.isSetBytesTotal());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetBytesTotal()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.bytesTotal, typedOther.bytesTotal);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetBytesUsed()).compareTo(typedOther.isSetBytesUsed());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetBytesUsed()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.bytesUsed, typedOther.bytesUsed);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetBytesRemaining()).compareTo(typedOther.isSetBytesRemaining());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetBytesRemaining()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.bytesRemaining, typedOther.bytesRemaining);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetBytesNonDfs()).compareTo(typedOther.isSetBytesNonDfs());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetBytesNonDfs()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.bytesNonDfs, typedOther.bytesNonDfs);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetNumLiveDataNodes()).compareTo(typedOther.isSetNumLiveDataNodes());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetNumLiveDataNodes()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.numLiveDataNodes, typedOther.numLiveDataNodes);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetNumDeadDataNodes()).compareTo(typedOther.isSetNumDeadDataNodes());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetNumDeadDataNodes()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.numDeadDataNodes, typedOther.numDeadDataNodes);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetUpgradeStatus()).compareTo(typedOther.isSetUpgradeStatus());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetUpgradeStatus()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.upgradeStatus, typedOther.upgradeStatus);
if (lastComparison != 0) {
return lastComparison;
}
}
lastComparison = Boolean.valueOf(isSetHttpPort()).compareTo(typedOther.isSetHttpPort());
if (lastComparison != 0) {
return lastComparison;
}
if (isSetHttpPort()) {
lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.httpPort, typedOther.httpPort);
if (lastComparison != 0) {
return lastComparison;
}
}
return 0;
}
public _Fields fieldForId(int fieldId) {
return _Fields.findByThriftId(fieldId);
}
public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField field;
iprot.readStructBegin();
while (true)
{
field = iprot.readFieldBegin();
if (field.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (field.id) {
case 1: // BYTES_TOTAL
if (field.type == org.apache.thrift.protocol.TType.I64) {
this.bytesTotal = iprot.readI64();
setBytesTotalIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
}
break;
case 2: // BYTES_USED
if (field.type == org.apache.thrift.protocol.TType.I64) {
this.bytesUsed = iprot.readI64();
setBytesUsedIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
}
break;
case 3: // BYTES_REMAINING
if (field.type == org.apache.thrift.protocol.TType.I64) {
this.bytesRemaining = iprot.readI64();
setBytesRemainingIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
}
break;
case 4: // BYTES_NON_DFS
if (field.type == org.apache.thrift.protocol.TType.I64) {
this.bytesNonDfs = iprot.readI64();
setBytesNonDfsIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
}
break;
case 5: // NUM_LIVE_DATA_NODES
if (field.type == org.apache.thrift.protocol.TType.I32) {
this.numLiveDataNodes = iprot.readI32();
setNumLiveDataNodesIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
}
break;
case 6: // NUM_DEAD_DATA_NODES
if (field.type == org.apache.thrift.protocol.TType.I32) {
this.numDeadDataNodes = iprot.readI32();
setNumDeadDataNodesIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
}
break;
case 7: // UPGRADE_STATUS
if (field.type == org.apache.thrift.protocol.TType.STRUCT) {
this.upgradeStatus = new UpgradeStatusReport();
this.upgradeStatus.read(iprot);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
}
break;
case 8: // HTTP_PORT
if (field.type == org.apache.thrift.protocol.TType.I32) {
this.httpPort = iprot.readI32();
setHttpPortIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
validate();
}
public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
validate();
oprot.writeStructBegin(STRUCT_DESC);
oprot.writeFieldBegin(BYTES_TOTAL_FIELD_DESC);
oprot.writeI64(this.bytesTotal);
oprot.writeFieldEnd();
oprot.writeFieldBegin(BYTES_USED_FIELD_DESC);
oprot.writeI64(this.bytesUsed);
oprot.writeFieldEnd();
oprot.writeFieldBegin(BYTES_REMAINING_FIELD_DESC);
oprot.writeI64(this.bytesRemaining);
oprot.writeFieldEnd();
oprot.writeFieldBegin(BYTES_NON_DFS_FIELD_DESC);
oprot.writeI64(this.bytesNonDfs);
oprot.writeFieldEnd();
oprot.writeFieldBegin(NUM_LIVE_DATA_NODES_FIELD_DESC);
oprot.writeI32(this.numLiveDataNodes);
oprot.writeFieldEnd();
oprot.writeFieldBegin(NUM_DEAD_DATA_NODES_FIELD_DESC);
oprot.writeI32(this.numDeadDataNodes);
oprot.writeFieldEnd();
if (this.upgradeStatus != null) {
oprot.writeFieldBegin(UPGRADE_STATUS_FIELD_DESC);
this.upgradeStatus.write(oprot);
oprot.writeFieldEnd();
}
oprot.writeFieldBegin(HTTP_PORT_FIELD_DESC);
oprot.writeI32(this.httpPort);
oprot.writeFieldEnd();
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("DFSHealthReport(");
boolean first = true;
sb.append("bytesTotal:");
sb.append(this.bytesTotal);
first = false;
if (!first) sb.append(", ");
sb.append("bytesUsed:");
sb.append(this.bytesUsed);
first = false;
if (!first) sb.append(", ");
sb.append("bytesRemaining:");
sb.append(this.bytesRemaining);
first = false;
if (!first) sb.append(", ");
sb.append("bytesNonDfs:");
sb.append(this.bytesNonDfs);
first = false;
if (!first) sb.append(", ");
sb.append("numLiveDataNodes:");
sb.append(this.numLiveDataNodes);
first = false;
if (!first) sb.append(", ");
sb.append("numDeadDataNodes:");
sb.append(this.numDeadDataNodes);
first = false;
if (!first) sb.append(", ");
sb.append("upgradeStatus:");
if (this.upgradeStatus == null) {
sb.append("null");
} else {
sb.append(this.upgradeStatus);
}
first = false;
if (!first) sb.append(", ");
sb.append("httpPort:");
sb.append(this.httpPort);
first = false;
sb.append(")");
return sb.toString();
}
public void validate() throws org.apache.thrift.TException {
// check for required fields
}
private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
try {
write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
try {
// it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
__isset_bit_vector = new BitSet(1);
read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
} catch (org.apache.thrift.TException te) {
throw new java.io.IOException(te);
}
}
}
| apache-2.0 |
iftekhar-ahmed/Bluetooth-Music-Player | app/src/main/java/com/apptitive/btmusicplayer/transport/AudioStreamThread.java | 2168 | package com.apptitive.btmusicplayer.transport;
import android.bluetooth.BluetoothSocket;
import android.os.Handler;
import com.apptitive.btmusicplayer.BluetoothActivity;
import com.apptitive.btmusicplayer.utils.Constants;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
/**
* Created by Iftekhar on 12/21/2014.
*/
public class AudioStreamThread extends Thread {
private int mBufferSize;
private final BluetoothSocket bluetoothSocket;
private final InputStream socketInputStream;
private final OutputStream socketOutputStream;
private Handler mHandler;
public AudioStreamThread(BluetoothSocket bluetoothSocket, Handler dataHandler, int bufferSize) {
this.bluetoothSocket = bluetoothSocket;
mHandler = dataHandler;
mBufferSize = bufferSize;
InputStream inputStream = null;
OutputStream outputStream = null;
try {
inputStream = bluetoothSocket.getInputStream();
outputStream = bluetoothSocket.getOutputStream();
} catch (IOException ioe) {
ioe.printStackTrace();
}
socketInputStream = inputStream;
socketOutputStream = outputStream;
}
@Override
public void run() {
byte[] buffer = new byte[mBufferSize];
int bytes;
while (true) {
try {
bytes = socketInputStream.read(buffer);
mHandler.obtainMessage(Constants.DATA_READ, bytes, -1, buffer).sendToTarget();
} catch (IOException e) {
e.printStackTrace();
mHandler.obtainMessage(Constants.CONNECTION_INTERRUPTED).sendToTarget();
break;
}
}
}
public void write(byte[] buffer) {
try {
socketOutputStream.write(buffer);
} catch (IOException e) {
e.printStackTrace();
mHandler.obtainMessage(Constants.CONNECTION_INTERRUPTED).sendToTarget();
}
}
public void cancel() {
try {
bluetoothSocket.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
| apache-2.0 |
SSEHUB/EASyProducer | Plugins/Instantiation/de.uni-hildesheim.sse.easy.instantiatorCore.tests/src/net/ssehub/easy/instantiation/core/model/vilTypes/NameSelector.java | 685 | package net.ssehub.easy.instantiation.core.model.vilTypes;
import net.ssehub.easy.varModel.confModel.IDecisionVariable;
/**
* A simple name-equality selector.
*
* @author Holger Eichelberger
*/
class NameSelector implements IVariableSelector {
private String selector;
/**
* Creates a new name-equality selector.
*
* @param selector the name
*/
public NameSelector(String selector) {
this.selector = selector;
}
@Override
public boolean select(IDecisionVariable var) {
return var.getDeclaration().getName().equals(selector)
|| var.getDeclaration().getQualifiedName().equals(selector);
}
} | apache-2.0 |
g-stone/java | distransaction/src/test/java/com/sxtj/mobilehis/usertest/service/TestServiceTest.java | 808 | package com.sxtj.mobilehis.usertest.service;
import javax.annotation.Resource;
import javax.transaction.Transactional;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
import org.springframework.test.context.transaction.TransactionConfiguration;
@RunWith(SpringJUnit4ClassRunner.class)
@ContextConfiguration(locations = { "classpath:hib-config.xml" })
@TransactionConfiguration(defaultRollback = false)
@Transactional
public class TestServiceTest {
@Resource
private TestService testService;
@Test
public void testGet() {
this.testService.editUser();
}
@Test
public void testEdit() {
this.testService.editUser();
}
}
| apache-2.0 |
etirelli/kie-wb-distributions | kie-wb-parent/kie-wb-webapp/src/main/java/org/kie/workbench/client/perspectives/AdministrationPerspective.java | 5429 | /*
* Copyright 2012 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.kie.workbench.client.perspectives;
import javax.annotation.PostConstruct;
import javax.enterprise.context.ApplicationScoped;
import javax.inject.Inject;
import org.guvnor.structure.client.editors.repository.clone.CloneRepositoryPresenter;
import org.guvnor.structure.organizationalunit.OrganizationalUnit;
import org.guvnor.structure.repositories.Repository;
import org.guvnor.structure.security.OrganizationalUnitAction;
import org.guvnor.structure.security.RepositoryAction;
import org.jboss.errai.ioc.client.container.SyncBeanManager;
import org.kie.workbench.client.resources.i18n.AppConstants;
import org.kie.workbench.common.widgets.client.handlers.NewResourcePresenter;
import org.kie.workbench.common.workbench.client.PerspectiveIds;
import org.uberfire.client.annotations.Perspective;
import org.uberfire.client.annotations.WorkbenchMenu;
import org.uberfire.client.annotations.WorkbenchPerspective;
import org.uberfire.client.callbacks.Callback;
import org.uberfire.client.mvp.PlaceManager;
import org.uberfire.client.workbench.panels.impl.MultiListWorkbenchPanelPresenter;
import org.uberfire.client.workbench.panels.impl.SimpleWorkbenchPanelPresenter;
import org.uberfire.mvp.Command;
import org.uberfire.mvp.impl.DefaultPlaceRequest;
import org.uberfire.workbench.model.CompassPosition;
import org.uberfire.workbench.model.PanelDefinition;
import org.uberfire.workbench.model.PerspectiveDefinition;
import org.uberfire.workbench.model.impl.PanelDefinitionImpl;
import org.uberfire.workbench.model.impl.PartDefinitionImpl;
import org.uberfire.workbench.model.impl.PerspectiveDefinitionImpl;
import org.uberfire.workbench.model.menu.MenuFactory;
import org.uberfire.workbench.model.menu.Menus;
/**
* A Perspective for Administrators
*/
@ApplicationScoped
@WorkbenchPerspective(identifier = PerspectiveIds.ADMINISTRATION)
public class AdministrationPerspective {
private AppConstants constants = AppConstants.INSTANCE;
@Inject
private NewResourcePresenter newResourcePresenter;
@Inject
private PlaceManager placeManager;
@Inject
private SyncBeanManager iocManager;
@Inject
private CloneRepositoryPresenter cloneRepositoryPresenter;
private Command newRepoCommand = null;
private Command cloneRepoCommand = null;
@PostConstruct
public void init() {
buildCommands();
}
@Perspective
public PerspectiveDefinition getPerspective() {
final PerspectiveDefinition perspective = new PerspectiveDefinitionImpl( MultiListWorkbenchPanelPresenter.class.getName() );
perspective.setName( constants.Administration() );
perspective.getRoot().addPart( new PartDefinitionImpl( new DefaultPlaceRequest( "RepositoriesEditor" ) ) );
final PanelDefinition west = new PanelDefinitionImpl( SimpleWorkbenchPanelPresenter.class.getName() );
west.setWidth( 300 );
west.setMinWidth( 200 );
west.addPart( new PartDefinitionImpl( new DefaultPlaceRequest( "FileExplorer" ) ) );
perspective.getRoot().insertChild( CompassPosition.WEST, west );
return perspective;
}
@WorkbenchMenu
public Menus getMenus() {
return MenuFactory
.newTopLevelMenu( constants.MenuOrganizationalUnits() )
.withPermission( OrganizationalUnit.RESOURCE_TYPE, OrganizationalUnitAction.READ )
.menus()
.menu( constants.MenuManageOrganizationalUnits() )
.respondsWith( () -> placeManager.goTo( "org.kie.workbench.common.screens.organizationalunit.manager.OrganizationalUnitManager" ) )
.endMenu()
.endMenus()
.endMenu()
.newTopLevelMenu( constants.repositories() )
.menus()
.menu( constants.listRepositories() )
.withPermission( Repository.RESOURCE_TYPE, RepositoryAction.READ )
.respondsWith( () -> placeManager.goTo( "RepositoriesEditor" ) )
.endMenu()
.menu( constants.cloneRepository() )
.withPermission( Repository.RESOURCE_TYPE, RepositoryAction.CREATE )
.respondsWith( cloneRepoCommand )
.endMenu()
.menu( constants.newRepository() )
.withPermission( Repository.RESOURCE_TYPE, RepositoryAction.CREATE )
.respondsWith( newRepoCommand )
.endMenu()
.endMenus()
.endMenu().build();
}
Command getNewRepoCommand() {
return newRepoCommand;
}
Command getCloneRepoCommand() {
return cloneRepoCommand;
}
private void buildCommands() {
this.cloneRepoCommand = () -> {
cloneRepositoryPresenter.showForm();
};
}
}
| apache-2.0 |
reportportal/commons-model | src/main/java/com/epam/ta/reportportal/ws/model/analyzer/IndexRsItem.java | 1035 | /*
* Copyright 2019 EPAM Systems
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.epam.ta.reportportal.ws.model.analyzer;
import com.fasterxml.jackson.annotation.JsonProperty;
/**
* Represents single item in indexing operation response.
*
* @author Ivan Sharamet
*/
public class IndexRsItem {
@JsonProperty("index")
private IndexRsIndex index;
public IndexRsItem() {
}
public IndexRsIndex getIndex() {
return index;
}
public void setIndex(IndexRsIndex index) {
this.index = index;
}
}
| apache-2.0 |
Soya93/Extract-Refactoring | platform/platform-impl/src/com/intellij/ide/ui/ColorBlindnessInternalAction.java | 5276 | /*
* Copyright 2000-2016 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.ide.ui;
import com.intellij.openapi.actionSystem.AnActionEvent;
import com.intellij.openapi.project.DumbAwareAction;
import com.intellij.openapi.ui.ComboBox;
import com.intellij.openapi.ui.DialogWrapper;
import com.intellij.util.ui.ImageUtil;
import com.intellij.util.ui.JBDimension;
import com.intellij.util.ui.JBInsets;
import com.intellij.util.ui.UIUtil;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import javax.swing.*;
import java.awt.*;
import java.awt.event.ItemEvent;
import java.awt.event.ItemListener;
import java.awt.image.BufferedImage;
import java.awt.image.ImageFilter;
/**
* @author Sergey.Malenkov
*/
public class ColorBlindnessInternalAction extends DumbAwareAction {
@Override
public void actionPerformed(AnActionEvent event) {
new ColorDialog(event).show();
}
private static final class ColorDialog extends DialogWrapper {
private final ColorView myView = new ColorView();
private final JComboBox myCombo = new ComboBox<>(FilterItem.ALL);
private ColorDialog(AnActionEvent event) {
super(event.getProject());
init();
setTitle("ColorBlindness");
}
@Nullable
@Override
public JComponent getPreferredFocusedComponent() {
return myCombo;
}
@Override
protected JComponent createCenterPanel() {
myView.setBorder(BorderFactory.createEtchedBorder());
myView.setMinimumSize(new JBDimension(360, 200));
myView.setPreferredSize(new JBDimension(720, 400));
myCombo.addItemListener(myView);
JPanel panel = new JPanel(new BorderLayout(10, 10));
panel.add(BorderLayout.CENTER, myView);
panel.add(BorderLayout.SOUTH, myCombo);
return panel;
}
@NotNull
@Override
protected Action[] createActions() {
return new Action[]{getCancelAction()};
}
}
private static final class FilterItem {
private final ImageFilter myFilter;
public FilterItem(ImageFilter filter) {
myFilter = filter;
}
@Override
public String toString() {
return myFilter == null ? "No filtering" : myFilter.toString();
}
private static final FilterItem[] ALL = new FilterItem[]{
new FilterItem(null),
new FilterItem(DaltonizationFilter.protanopia),
new FilterItem(MatrixFilter.protanopia),
new FilterItem(DaltonizationFilter.deuteranopia),
new FilterItem(MatrixFilter.deuteranopia),
new FilterItem(DaltonizationFilter.tritanopia),
new FilterItem(MatrixFilter.tritanopia),
new FilterItem(SimulationFilter.protanopia),
new FilterItem(MatrixFilter.forProtanopia(null, false)),
new FilterItem(SimulationFilter.deuteranopia),
new FilterItem(MatrixFilter.forDeuteranopia(null, false)),
new FilterItem(SimulationFilter.tritanopia),
new FilterItem(MatrixFilter.forTritanopia(null, false)),
new FilterItem(SimulationFilter.achromatopsia),
};
}
private static final class ColorView extends JComponent implements ItemListener {
private ImageFilter myFilter;
private Image myImage;
@Override
protected void paintComponent(Graphics g) {
Rectangle bounds = new Rectangle(getWidth(), getHeight());
JBInsets.removeFrom(bounds, getInsets());
if (bounds.isEmpty()) return;
if (myImage == null || bounds.width != myImage.getWidth(this) || bounds.height != myImage.getHeight(this)) {
int[] array = new int[bounds.width * bounds.height];
float width = (float)(bounds.width - 1);
float height = (float)(bounds.height - 1);
for (int i = 0, h = 0; h < bounds.height; h++) {
for (int w = 0; w < bounds.width; w++, i++) {
float level = 2 * h / height;
float saturation = (level > 1f) ? 1 : level;
float brightness = (level > 1f) ? 2 - level : 1;
array[i] = Color.HSBtoRGB(w / width, saturation, brightness);
}
}
BufferedImage image = UIUtil.createImage(bounds.width, bounds.height, BufferedImage.TYPE_INT_RGB);
image.setRGB(0, 0, bounds.width, bounds.height, array, 0, bounds.width);
myImage = ImageUtil.filter(image, myFilter);
}
g.drawImage(myImage, bounds.x, bounds.y, bounds.width, bounds.height, this);
}
@Override
public void itemStateChanged(ItemEvent event) {
if (ItemEvent.SELECTED == event.getStateChange()) {
Object object = event.getItem();
if (object instanceof FilterItem) {
FilterItem item = (FilterItem)object;
myFilter = item.myFilter;
myImage = null;
repaint();
}
}
}
}
}
| apache-2.0 |
kaichao/Point-In-Polygon | src/main/java/cnic/pip/impl/EnvelopeExtEsri_v2.java | 1788 | package cnic.pip.impl;
import java.util.ArrayList;
import java.util.List;
import com.esri.core.geometry.ogc.OGCGeometry;
import com.esri.core.geometry.ogc.OGCLineString;
import com.esri.core.geometry.ogc.OGCMultiLineString;
import com.esri.core.geometry.ogc.OGCPoint;
public class EnvelopeExtEsri_v2 extends EnvelopeExt {
/**
*
*/
private static final long serialVersionUID = 8378434653149304262L;
public EnvelopeExtEsri_v2() {
super();
}
public long[][] makeBorderData(String wkt) {
OGCGeometry poly = OGCGeometry.fromText(wkt);
List<Long> li = new ArrayList<Long>();
double xs,xe,y;
for(long j=ny0;j<=ny1;j++){
String ls = String.format("LINESTRING(%s %s,%s %s)",
Double.toString(x0),Double.toString((j+0.5)/ppd),
Double.toString(x1),Double.toString((j+0.5)/ppd));
OGCGeometry line = OGCGeometry.fromText(ls);
OGCGeometry g = poly.intersection(line);
if(g.isEmpty()){
continue;
}
if(g instanceof OGCLineString){
OGCPoint s = ((OGCLineString) g).startPoint();
OGCPoint e = ((OGCLineString) g).endPoint();
xs = s.X() + roundingError;
xe = e.X() - roundingError;
y = s.Y();
li.add(getIndex(xs,y));
li.add(getIndex(xe,y));
} else if(g instanceof OGCMultiLineString){
OGCMultiLineString ml = (OGCMultiLineString)g;
for(int i=0;i<ml.numGeometries();i++){
OGCLineString gg = (OGCLineString)ml.geometryN(i);
OGCPoint s = gg.startPoint();
OGCPoint e = gg.endPoint();
xs = s.X() + roundingError;
xe = e.X() - roundingError;
y = s.Y();
li.add(getIndex(xs,y));
li.add(getIndex(xe,y));
}
}
}
long[][] ret = new long[2][li.size()/2];
for(int i=0;i<li.size()/2;i++){
ret[0][i] = li.get(2*i);
ret[1][i] = li.get(2*i+1);
}
return ret;
}
}
| apache-2.0 |
indvd00m/java-ascii-render | ascii-render/src/test/java/com/indvd00m/ascii/render/tests/TestLabel.java | 2738 | package com.indvd00m.ascii.render.tests;
import com.indvd00m.ascii.render.Canvas;
import com.indvd00m.ascii.render.Point;
import com.indvd00m.ascii.render.api.ICanvas;
import com.indvd00m.ascii.render.api.IContext;
import com.indvd00m.ascii.render.api.IPoint;
import com.indvd00m.ascii.render.elements.Label;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.mock;
/**
* @author indvd00m (gotoindvdum[at]gmail[dot]com)
* @since 0.9.0
*/
public class TestLabel {
@Test
public void test01() {
IContext context = mock(IContext.class);
ICanvas canvas = new Canvas(10, 5);
Label l = new Label("Test", 1, 1, 4);
IPoint point = l.draw(canvas, context);
assertEquals(new Point(1, 1), point);
String s = "";
s += " \n";
s += " Test \n";
s += " \n";
s += " \n";
s += " ";
System.out.println(canvas.getText());
assertEquals(s, canvas.getText());
}
@Test
public void test02() {
IContext context = mock(IContext.class);
ICanvas canvas = new Canvas(10, 5);
Label l = new Label("Test", 1, 2, 3);
IPoint point = l.draw(canvas, context);
assertEquals(new Point(1, 2), point);
String s = "";
s += " \n";
s += " \n";
s += " Te… \n";
s += " \n";
s += " ";
System.out.println(canvas.getText());
assertEquals(s, canvas.getText());
}
@Test
public void test03() {
IContext context = mock(IContext.class);
ICanvas canvas = new Canvas(10, 5);
Label l = new Label("Test", 1, 3, 0);
IPoint point = l.draw(canvas, context);
assertEquals(new Point(1, 3), point);
String s = "";
s += " \n";
s += " \n";
s += " \n";
s += " \n";
s += " ";
System.out.println(canvas.getText());
assertEquals(s, canvas.getText());
}
@Test
public void test04() {
IContext context = mock(IContext.class);
ICanvas canvas = new Canvas(10, 5);
Label l = new Label("Test");
IPoint point = l.draw(canvas, context);
assertEquals(new Point(0, 0), point);
String s = "";
s += "Test \n";
s += " \n";
s += " \n";
s += " \n";
s += " ";
System.out.println(canvas.getText());
assertEquals(s, canvas.getText());
}
@Test
public void test05() {
IContext context = mock(IContext.class);
ICanvas canvas = new Canvas(10, 5);
Label l = new Label("Test", 1, 1);
IPoint point = l.draw(canvas, context);
assertEquals(new Point(1, 1), point);
String s = "";
s += " \n";
s += " Test \n";
s += " \n";
s += " \n";
s += " ";
System.out.println(canvas.getText());
assertEquals(s, canvas.getText());
}
}
| apache-2.0 |
DracoAnimus/Coding | src/main/java/net/wildbill22/draco/items/dragoneggs/ItemSkeletonDragonEgg.java | 1521 | package net.wildbill22.draco.items.dragoneggs;
import java.util.List;
import cpw.mods.fml.relauncher.Side;
import cpw.mods.fml.relauncher.SideOnly;
import net.minecraft.entity.player.EntityPlayer;
import net.minecraft.item.Item;
import net.minecraft.item.ItemStack;
import net.minecraft.util.StatCollector;
import net.wildbill22.draco.entities.dragons.EntityDracoMortem;
import net.wildbill22.draco.items.ModItems;
import net.wildbill22.draco.items.weapons.ModWeapons;
public class ItemSkeletonDragonEgg extends ItemDragonEgg {
public static final String name = "skeletonDragonEgg";
private final static String dragonName = EntityDracoMortem.name;
public ItemSkeletonDragonEgg() {
super(name, dragonName);
// this.addDragonFood(dragonName, ModItems.villagerHeart);
this.addDragonFood(dragonName, ModItems.villagerSkull);
}
@SuppressWarnings({ "rawtypes", "unchecked" })
@Override
@SideOnly(Side.CLIENT)
public void addInformation(ItemStack stack, EntityPlayer player, List list, boolean par4) {
list.add(StatCollector.translateToLocal("egg.wildbill22_draco_putThisEgg"));
list.add(StatCollector.translateToLocal("egg.wildbill22_draco_youWillGetStaff"));
list.add(StatCollector.translateToLocal("egg.wildbill22_draco_intoSkeletonDragon"));
}
@Override
public String getEggName() {
return name;
}
@Override
public Item getEggItem() {
return ModItems.skeletonDragonEgg;
}
@Override
public ItemStack getStaffItemStack() {
return new ItemStack(ModWeapons.skeletonDragonStaff);
}
}
| apache-2.0 |
erichwang/presto | presto-main/src/main/java/io/prestosql/operator/window/WindowPartition.java | 30910 | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.prestosql.operator.window;
import com.google.common.collect.ImmutableList;
import io.prestosql.operator.PagesHashStrategy;
import io.prestosql.operator.PagesIndex;
import io.prestosql.operator.PagesIndexComparator;
import io.prestosql.operator.WindowOperator.FrameBoundKey;
import io.prestosql.spi.PageBuilder;
import io.prestosql.spi.function.WindowIndex;
import io.prestosql.sql.tree.FrameBound;
import io.prestosql.sql.tree.FrameBound.Type;
import io.prestosql.sql.tree.SortItem.Ordering;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
import java.util.function.Predicate;
import static com.google.common.base.Preconditions.checkState;
import static io.prestosql.operator.WindowOperator.FrameBoundKey.Type.END;
import static io.prestosql.operator.WindowOperator.FrameBoundKey.Type.START;
import static io.prestosql.spi.StandardErrorCode.INVALID_WINDOW_FRAME;
import static io.prestosql.sql.tree.FrameBound.Type.CURRENT_ROW;
import static io.prestosql.sql.tree.FrameBound.Type.FOLLOWING;
import static io.prestosql.sql.tree.FrameBound.Type.PRECEDING;
import static io.prestosql.sql.tree.FrameBound.Type.UNBOUNDED_FOLLOWING;
import static io.prestosql.sql.tree.FrameBound.Type.UNBOUNDED_PRECEDING;
import static io.prestosql.sql.tree.SortItem.Ordering.ASCENDING;
import static io.prestosql.sql.tree.SortItem.Ordering.DESCENDING;
import static io.prestosql.sql.tree.WindowFrame.Type.GROUPS;
import static io.prestosql.sql.tree.WindowFrame.Type.RANGE;
import static io.prestosql.util.Failures.checkCondition;
import static java.lang.Math.toIntExact;
import static java.util.Objects.requireNonNull;
public final class WindowPartition
{
private final PagesIndex pagesIndex;
private final int partitionStart;
private final int partitionEnd;
private final int[] outputChannels;
private final List<FramedWindowFunction> windowFunctions;
// Recently computed frame bounds for functions with frame type RANGE.
// When computing frame start and frame end for a row, frame bounds for the previous row
// are used as the starting point. Then they are moved backward or forward based on the sort order
// until the matching position for a current row is found.
// This approach is efficient in case when frame offset values are constant. It was chosen
// based on the assumption that in most use cases frame offset is constant rather than
// row-dependent.
private final Map<Integer, Range> recentRanges;
private final PagesHashStrategy peerGroupHashStrategy;
private final Map<FrameBoundKey, PagesIndexComparator> frameBoundComparators;
// Recently computed frames for functions with frame type GROUPS.
// Along frame start and frame end, they also capture indexes of peer groups
// where frame bounds fall.
// This information is used as the starting point when processing the next row.
// This approach is efficient in case when group offset values are constant,
// which is assumed to be the most common use case.
private final Map<Integer, GroupsFrame> recentGroupsFrames;
private int peerGroupStart;
private int peerGroupEnd;
private int currentGroupIndex = -1;
private int lastPeerGroup = Integer.MAX_VALUE;
private final Function<Integer, Integer> seekGroupStart;
private final Function<Integer, Integer> seekGroupEnd;
private int currentPosition;
public WindowPartition(
PagesIndex pagesIndex,
int partitionStart,
int partitionEnd,
int[] outputChannels,
List<FramedWindowFunction> windowFunctions,
PagesHashStrategy peerGroupHashStrategy,
Map<FrameBoundKey, PagesIndexComparator> frameBoundComparators)
{
this.pagesIndex = pagesIndex;
this.partitionStart = partitionStart;
this.partitionEnd = partitionEnd;
this.outputChannels = outputChannels;
this.windowFunctions = ImmutableList.copyOf(windowFunctions);
this.peerGroupHashStrategy = peerGroupHashStrategy;
this.frameBoundComparators = frameBoundComparators;
// reset functions for new partition
WindowIndex windowIndex = new PagesWindowIndex(pagesIndex, partitionStart, partitionEnd);
for (FramedWindowFunction framedWindowFunction : windowFunctions) {
framedWindowFunction.getFunction().reset(windowIndex);
}
currentPosition = partitionStart;
updatePeerGroup();
recentRanges = initializeRangeCache(partitionStart, partitionEnd, peerGroupEnd, windowFunctions);
recentGroupsFrames = initializeGroupsFrameCache(partitionStart, peerGroupEnd, windowFunctions);
seekGroupStart = position -> {
requireNonNull(position, "position is null");
while (position > 0 && pagesIndex.positionEqualsPosition(peerGroupHashStrategy, partitionStart + position, partitionStart + position - 1)) {
position--;
}
return position;
};
seekGroupEnd = position -> {
requireNonNull(position, "position is null");
while (position < partitionEnd - 1 - partitionStart && pagesIndex.positionEqualsPosition(peerGroupHashStrategy, partitionStart + position, partitionStart + position + 1)) {
position++;
}
return position;
};
}
private static Map<Integer, Range> initializeRangeCache(int partitionStart, int partitionEnd, int peerGroupEnd, List<FramedWindowFunction> windowFunctions)
{
Map<Integer, Range> ranges = new HashMap<>();
Range initialPeerRange = new Range(0, peerGroupEnd - partitionStart - 1);
Range initialUnboundedRange = new Range(0, partitionEnd - partitionStart - 1);
for (int i = 0; i < windowFunctions.size(); i++) {
FrameInfo frame = windowFunctions.get(i).getFrame();
if (frame.getType() == RANGE) {
if (frame.getEndType() == UNBOUNDED_FOLLOWING) {
ranges.put(i, initialUnboundedRange);
}
else {
ranges.put(i, initialPeerRange);
}
}
}
return ranges;
}
private static Map<Integer, GroupsFrame> initializeGroupsFrameCache(int partitionStart, int peerGroupEnd, List<FramedWindowFunction> windowFunctions)
{
Map<Integer, GroupsFrame> frames = new HashMap<>();
GroupsFrame initialPeerFrame = new GroupsFrame(0, 0, peerGroupEnd - partitionStart - 1, 0);
for (int i = 0; i < windowFunctions.size(); i++) {
FrameInfo frame = windowFunctions.get(i).getFrame();
if (frame.getType() == GROUPS) {
frames.put(i, initialPeerFrame);
}
}
return frames;
}
public int getPartitionStart()
{
return partitionStart;
}
public int getPartitionEnd()
{
return partitionEnd;
}
public boolean hasNext()
{
return currentPosition < partitionEnd;
}
public void processNextRow(PageBuilder pageBuilder)
{
checkState(hasNext(), "No more rows in partition");
// copy output channels
pageBuilder.declarePosition();
int channel = 0;
while (channel < outputChannels.length) {
pagesIndex.appendTo(outputChannels[channel], currentPosition, pageBuilder.getBlockBuilder(channel));
channel++;
}
// check for new peer group
if (currentPosition == peerGroupEnd) {
updatePeerGroup();
}
for (int i = 0; i < windowFunctions.size(); i++) {
FramedWindowFunction framedFunction = windowFunctions.get(i);
Range range = getFrameRange(framedFunction.getFrame(), i);
framedFunction.getFunction().processRow(
pageBuilder.getBlockBuilder(channel),
peerGroupStart - partitionStart,
peerGroupEnd - partitionStart - 1,
range.getStart(),
range.getEnd());
channel++;
}
currentPosition++;
}
private static class Range
{
private final int start;
private final int end;
Range(int start, int end)
{
this.start = start;
this.end = end;
}
public int getStart()
{
return start;
}
public int getEnd()
{
return end;
}
}
private void updatePeerGroup()
{
currentGroupIndex++;
peerGroupStart = currentPosition;
// find end of peer group
peerGroupEnd = peerGroupStart + 1;
while ((peerGroupEnd < partitionEnd) && pagesIndex.positionEqualsPosition(peerGroupHashStrategy, peerGroupStart, peerGroupEnd)) {
peerGroupEnd++;
}
}
private Range getFrameRange(FrameInfo frameInfo, int functionIndex)
{
switch (frameInfo.getType()) {
case RANGE:
Range range = getFrameRange(
frameInfo,
recentRanges.get(functionIndex),
frameBoundComparators.get(new FrameBoundKey(functionIndex, START)),
frameBoundComparators.get(new FrameBoundKey(functionIndex, END)));
// handle empty frame. If the frame is out of partition bounds, record the nearest valid frame as the 'recentRange' for the next row.
if (emptyFrame(range)) {
recentRanges.put(functionIndex, nearestValidFrame(range));
return new Range(-1, -1);
}
recentRanges.put(functionIndex, range);
return range;
case ROWS:
return getFrameRange(frameInfo);
case GROUPS:
GroupsFrame frame = getFrameRange(frameInfo, recentGroupsFrames.get(functionIndex));
// handle empty frame. If the frame is out of partition bounds, record the nearest valid frame as the 'recentFrame' for the next row.
if (emptyFrame(frame.getRange())) {
recentGroupsFrames.put(functionIndex, nearestValidFrame(frame));
return new Range(-1, -1);
}
recentGroupsFrames.put(functionIndex, frame);
return frame.getRange();
default:
throw new IllegalArgumentException("Unsupported frame type: " + frameInfo.getType());
}
}
private Range getFrameRange(FrameInfo frameInfo)
{
int rowPosition = currentPosition - partitionStart;
int endPosition = partitionEnd - partitionStart - 1;
// handle empty frame
if (emptyFrame(frameInfo, rowPosition, endPosition)) {
return new Range(-1, -1);
}
int frameStart;
int frameEnd;
// frame start
if (frameInfo.getStartType() == UNBOUNDED_PRECEDING) {
frameStart = 0;
}
else if (frameInfo.getStartType() == PRECEDING) {
frameStart = preceding(rowPosition, getStartValue(frameInfo));
}
else if (frameInfo.getStartType() == FOLLOWING) {
frameStart = following(rowPosition, endPosition, getStartValue(frameInfo));
}
else {
frameStart = rowPosition;
}
// frame end
if (frameInfo.getEndType() == UNBOUNDED_FOLLOWING) {
frameEnd = endPosition;
}
else if (frameInfo.getEndType() == PRECEDING) {
frameEnd = preceding(rowPosition, getEndValue(frameInfo));
}
else if (frameInfo.getEndType() == FOLLOWING) {
frameEnd = following(rowPosition, endPosition, getEndValue(frameInfo));
}
else {
frameEnd = rowPosition;
}
return new Range(frameStart, frameEnd);
}
private Range getFrameRange(FrameInfo frameInfo, Range recentRange, PagesIndexComparator startComparator, PagesIndexComparator endComparator)
{
// full partition
if ((frameInfo.getStartType() == UNBOUNDED_PRECEDING && frameInfo.getEndType() == UNBOUNDED_FOLLOWING)) {
return new Range(0, partitionEnd - partitionStart - 1);
}
// frame defined by peer group
if (frameInfo.getStartType() == CURRENT_ROW && frameInfo.getEndType() == CURRENT_ROW ||
frameInfo.getStartType() == CURRENT_ROW && frameInfo.getEndType() == UNBOUNDED_FOLLOWING ||
frameInfo.getStartType() == UNBOUNDED_PRECEDING && frameInfo.getEndType() == CURRENT_ROW) {
// same peer group as recent row
if (currentPosition == partitionStart || pagesIndex.positionEqualsPosition(peerGroupHashStrategy, currentPosition - 1, currentPosition)) {
return recentRange;
}
// next peer group
return new Range(
frameInfo.getStartType() == UNBOUNDED_PRECEDING ? 0 : peerGroupStart - partitionStart,
frameInfo.getEndType() == UNBOUNDED_FOLLOWING ? partitionEnd - partitionStart - 1 : peerGroupEnd - partitionStart - 1);
}
// at this point, frame definition has at least one of: X PRECEDING, Y FOLLOWING
// 1. leading or trailing nulls: frame consists of nulls peer group, possibly extended to partition start / end.
// according to Spec, behavior of "X PRECEDING", "X FOLLOWING" frame boundaries is similar to "CURRENT ROW" for null values.
if (pagesIndex.isNull(frameInfo.getSortKeyChannel(), currentPosition)) {
return new Range(
frameInfo.getStartType() == UNBOUNDED_PRECEDING ? 0 : peerGroupStart - partitionStart,
frameInfo.getEndType() == UNBOUNDED_FOLLOWING ? partitionEnd - partitionStart - 1 : peerGroupEnd - partitionStart - 1);
}
// 2. non-null value in current row. Find frame boundaries starting from recentRange
int frameStart;
if (frameInfo.getStartType() == UNBOUNDED_PRECEDING) {
frameStart = 0;
}
else if (frameInfo.getStartType() == CURRENT_ROW) {
frameStart = peerGroupStart - partitionStart;
}
else if (frameInfo.getStartType() == PRECEDING) {
frameStart = getFrameStartPreceding(recentRange.getStart(), frameInfo, startComparator);
}
else {
// frameInfo.getStartType() == FOLLOWING
// note: this is the only case where frameStart might get out of partition bound
frameStart = getFrameStartFollowing(recentRange.getStart(), frameInfo, startComparator);
}
int frameEnd;
if (frameInfo.getEndType() == UNBOUNDED_FOLLOWING) {
frameEnd = partitionEnd - partitionStart - 1;
}
else if (frameInfo.getEndType() == CURRENT_ROW) {
frameEnd = peerGroupEnd - partitionStart - 1;
}
else if (frameInfo.getEndType() == PRECEDING) {
// note: this is the only case where frameEnd might get out of partition bound
frameEnd = getFrameEndPreceding(recentRange.getEnd(), frameInfo, endComparator);
}
else {
// frameInfo.getEndType() == FOLLOWING
frameEnd = getFrameEndFollowing(recentRange.getEnd(), frameInfo, endComparator);
}
return new Range(frameStart, frameEnd);
}
private int getFrameStartPreceding(int recent, FrameInfo frameInfo, PagesIndexComparator comparator)
{
int sortKeyChannel = frameInfo.getSortKeyChannelForStartComparison();
Ordering ordering = frameInfo.getOrdering().get();
// If the recent frame start points at a null, it means that we are now processing first non-null position.
// For frame start "X PRECEDING", the frame starts at the first null for all null values, and it never includes nulls for non-null values.
if (pagesIndex.isNull(frameInfo.getSortKeyChannel(), partitionStart + recent)) {
return currentPosition - partitionStart;
}
return seek(
comparator,
sortKeyChannel,
recent,
-1,
ordering == DESCENDING,
0,
p -> false);
}
private int getFrameStartFollowing(int recent, FrameInfo frameInfo, PagesIndexComparator comparator)
{
int sortKeyChannel = frameInfo.getSortKeyChannelForStartComparison();
Ordering ordering = frameInfo.getOrdering().get();
int position = recent;
// If the recent frame start points at the beginning of partition and it is null, it means that we are now processing first non-null position.
// frame start for first non-null position - leave section of leading nulls
if (recent == 0 && pagesIndex.isNull(frameInfo.getSortKeyChannel(), partitionStart)) {
position = currentPosition - partitionStart;
}
// leave section of trailing nulls
while (pagesIndex.isNull(frameInfo.getSortKeyChannel(), partitionStart + position)) {
position--;
}
return seek(
comparator,
sortKeyChannel,
position,
-1,
ordering == DESCENDING,
0,
p -> p >= partitionEnd - partitionStart || pagesIndex.isNull(sortKeyChannel, partitionStart + p));
}
private int getFrameEndPreceding(int recent, FrameInfo frameInfo, PagesIndexComparator comparator)
{
int sortKeyChannel = frameInfo.getSortKeyChannelForEndComparison();
Ordering ordering = frameInfo.getOrdering().get();
int position = recent;
// leave section of leading nulls
while (pagesIndex.isNull(frameInfo.getSortKeyChannel(), partitionStart + position)) {
position++;
}
return seek(
comparator,
sortKeyChannel,
position,
1,
ordering == ASCENDING,
partitionEnd - 1 - partitionStart,
p -> p < 0 || pagesIndex.isNull(sortKeyChannel, partitionStart + p));
}
private int getFrameEndFollowing(int recent, FrameInfo frameInfo, PagesIndexComparator comparator)
{
Ordering ordering = frameInfo.getOrdering().get();
int sortKeyChannel = frameInfo.getSortKeyChannelForEndComparison();
int position = recent;
// frame end for first non-null position - leave section of leading nulls
if (pagesIndex.isNull(frameInfo.getSortKeyChannel(), partitionStart + recent)) {
position = currentPosition - partitionStart;
}
return seek(
comparator,
sortKeyChannel,
position,
1,
ordering == ASCENDING,
partitionEnd - 1 - partitionStart,
p -> false);
}
private int compare(PagesIndexComparator comparator, int left, int right, boolean reverse)
{
int result = comparator.compareTo(pagesIndex, left, right);
if (reverse) {
return -result;
}
return result;
}
// This method assumes that `sortKeyChannel` is not null at `position`
private int seek(PagesIndexComparator comparator, int sortKeyChannel, int position, int step, boolean reverse, int limit, Predicate<Integer> bound)
{
int comparison = compare(comparator, partitionStart + position, currentPosition, reverse);
while (comparison < 0) {
position -= step;
if (bound.test(position)) {
return position;
}
comparison = compare(comparator, partitionStart + position, currentPosition, reverse);
}
while (true) {
if (position == limit || pagesIndex.isNull(sortKeyChannel, partitionStart + position + step)) {
break;
}
int newComparison = compare(comparator, partitionStart + position + step, currentPosition, reverse);
if (newComparison >= 0) {
position += step;
}
else {
break;
}
}
return position;
}
private boolean emptyFrame(Range range)
{
return range.getStart() > range.getEnd() ||
range.getStart() >= partitionEnd - partitionStart ||
range.getEnd() < 0;
}
/**
* Return the nearest valid frame. A frame is valid if its start and end are within partition.
* Note: A valid frame might be empty i.e. its end might be before its start.
*/
private Range nearestValidFrame(Range range)
{
return new Range(
Math.min(partitionEnd - partitionStart - 1, range.getStart()),
Math.max(0, range.getEnd()));
}
private boolean emptyFrame(FrameInfo frameInfo, int rowPosition, int endPosition)
{
FrameBound.Type startType = frameInfo.getStartType();
FrameBound.Type endType = frameInfo.getEndType();
int positions = endPosition - rowPosition;
if ((startType == UNBOUNDED_PRECEDING) && (endType == PRECEDING)) {
return getEndValue(frameInfo) > rowPosition;
}
if ((startType == FOLLOWING) && (endType == UNBOUNDED_FOLLOWING)) {
return getStartValue(frameInfo) > positions;
}
if (startType != endType) {
return false;
}
FrameBound.Type type = frameInfo.getStartType();
if ((type != PRECEDING) && (type != FOLLOWING)) {
return false;
}
long start = getStartValue(frameInfo);
long end = getEndValue(frameInfo);
if (type == PRECEDING) {
return (start < end) || ((start > rowPosition) && (end > rowPosition));
}
return (start > end) || (start > positions);
}
private static int preceding(int rowPosition, long value)
{
if (value > rowPosition) {
return 0;
}
return toIntExact(rowPosition - value);
}
private static int following(int rowPosition, int endPosition, long value)
{
if (value > (endPosition - rowPosition)) {
return endPosition;
}
return toIntExact(rowPosition + value);
}
private long getStartValue(FrameInfo frameInfo)
{
return getFrameValue(frameInfo.getStartChannel(), "starting");
}
private long getEndValue(FrameInfo frameInfo)
{
return getFrameValue(frameInfo.getEndChannel(), "ending");
}
private long getFrameValue(int channel, String type)
{
checkCondition(!pagesIndex.isNull(channel, currentPosition), INVALID_WINDOW_FRAME, "Window frame %s offset must not be null", type);
long value = pagesIndex.getLong(channel, currentPosition);
checkCondition(value >= 0, INVALID_WINDOW_FRAME, "Window frame %s offset must not be negative", value);
return value;
}
private GroupsFrame getFrameRange(FrameInfo frameInfo, GroupsFrame recentFrame)
{
Type startType = frameInfo.getStartType();
Type endType = frameInfo.getEndType();
int start;
int end;
int startGroupIndex = GroupsFrame.ignoreIndex();
int endGroupIndex = GroupsFrame.ignoreIndex();
switch (startType) {
case UNBOUNDED_PRECEDING:
start = 0;
break;
case CURRENT_ROW:
start = peerGroupStart - partitionStart;
break;
case PRECEDING: {
PositionAndGroup frameStart = seek(toIntExact(currentGroupIndex - getStartValue(frameInfo)), recentFrame.getStart(), recentFrame.getStartGroupIndex(), seekGroupStart, lastGroup -> new PositionAndGroup(0, 0));
start = frameStart.getPosition();
startGroupIndex = frameStart.getGroup();
break;
}
case FOLLOWING: {
PositionAndGroup frameStart = seek(toIntExact(currentGroupIndex + getStartValue(frameInfo)), recentFrame.getStart(), recentFrame.getStartGroupIndex(), seekGroupStart, lastGroup -> new PositionAndGroup(partitionEnd - partitionStart, GroupsFrame.ignoreIndex()));
start = frameStart.getPosition();
startGroupIndex = frameStart.getGroup();
break;
}
default:
throw new UnsupportedOperationException("Unsupported frame start type: " + startType);
}
switch (endType) {
case UNBOUNDED_FOLLOWING:
end = partitionEnd - partitionStart - 1;
break;
case CURRENT_ROW:
end = peerGroupEnd - partitionStart - 1;
break;
case PRECEDING: {
PositionAndGroup frameEnd = seek(toIntExact(currentGroupIndex - getEndValue(frameInfo)), recentFrame.getEnd(), recentFrame.getEndGroupIndex(), seekGroupEnd, lastGroup -> new PositionAndGroup(-1, GroupsFrame.ignoreIndex()));
end = frameEnd.getPosition();
endGroupIndex = frameEnd.getGroup();
break;
}
case FOLLOWING: {
PositionAndGroup frameEnd = seek(toIntExact(currentGroupIndex + getEndValue(frameInfo)), recentFrame.getEnd(), recentFrame.getEndGroupIndex(), seekGroupEnd, lastGroup -> new PositionAndGroup(partitionEnd - partitionStart - 1, lastPeerGroup));
end = frameEnd.getPosition();
endGroupIndex = frameEnd.getGroup();
break;
}
default:
throw new UnsupportedOperationException("Unsupported frame end type: " + endType);
}
return new GroupsFrame(start, startGroupIndex, end, endGroupIndex);
}
private PositionAndGroup seek(int groupIndex, int recentPosition, int recentGroupIndex, Function<Integer, Integer> seekPositionWithinGroup, EdgeResultProvider edgeResult)
{
if (groupIndex < 0 || groupIndex > lastPeerGroup) {
return edgeResult.get(lastPeerGroup);
}
while (recentGroupIndex > groupIndex) {
recentPosition = seekGroupStart.apply(recentPosition);
recentPosition--;
recentGroupIndex--;
}
while (recentGroupIndex < groupIndex) {
recentPosition = seekGroupEnd.apply(recentPosition);
if (recentPosition == partitionEnd - partitionStart - 1) {
lastPeerGroup = recentGroupIndex;
return edgeResult.get(lastPeerGroup);
}
recentPosition++;
recentGroupIndex++;
}
recentPosition = seekPositionWithinGroup.apply(recentPosition);
if (recentPosition == partitionEnd - partitionStart - 1) {
lastPeerGroup = recentGroupIndex;
}
return new PositionAndGroup(recentPosition, recentGroupIndex);
}
/**
* Return a valid frame. A frame is valid if its start and end are within partition.
* If frame start or frame end is out of partition bounds, it is set to the nearest position
* for which peer group index can be determined.
*/
private GroupsFrame nearestValidFrame(GroupsFrame frame)
{
if (frame.getStart() > partitionEnd - partitionStart - 1) {
return frame.withStart(partitionEnd - partitionStart - 1, lastPeerGroup);
}
if (frame.getEnd() < 0) {
return frame.withEnd(0, 0);
}
return frame;
}
/**
* Window frame representation for frame of type GROUPS.
* start, end - first and last row of the frame within window partition
* startGroupIndex, endGroupIndex - indexes of respective peer groups within partition
* start points at the first row of startGroupIndex-th peer group
* end points at the last row of endGroupIndex-th peer group
*/
private static class GroupsFrame
{
private static final int IGNORE_GROUP_INDEX = -1;
private final int start;
private final int startGroupIndex;
private final int end;
private final int endGroupIndex;
public GroupsFrame(int start, int startGroupIndex, int end, int endGroupIndex)
{
this.start = start;
this.startGroupIndex = startGroupIndex;
this.end = end;
this.endGroupIndex = endGroupIndex;
}
public static int ignoreIndex()
{
return IGNORE_GROUP_INDEX;
}
public GroupsFrame withStart(int start, int startGroupIndex)
{
return new GroupsFrame(start, startGroupIndex, this.end, this.endGroupIndex);
}
public GroupsFrame withEnd(int end, int endGroupIndex)
{
return new GroupsFrame(this.start, this.startGroupIndex, end, endGroupIndex);
}
public int getStart()
{
return start;
}
public int getStartGroupIndex()
{
checkState(startGroupIndex != IGNORE_GROUP_INDEX, "accessing ignored group index");
return startGroupIndex;
}
public int getEnd()
{
return end;
}
public int getEndGroupIndex()
{
checkState(endGroupIndex != IGNORE_GROUP_INDEX, "accessing ignored group index");
return endGroupIndex;
}
public Range getRange()
{
return new Range(start, end);
}
}
private static class PositionAndGroup
{
private final int position;
private final int group;
public PositionAndGroup(int position, int group)
{
this.position = position;
this.group = group;
}
public int getPosition()
{
return position;
}
public int getGroup()
{
return group;
}
}
private interface EdgeResultProvider
{
PositionAndGroup get(int lastPeerGroup);
}
}
| apache-2.0 |
srpepperoni/libGDX | backends/gdx-backend-lwjgl/src/com/badlogic/gdx/backends/lwjgl/LwjglFiles.java | 2182 | /*******************************************************************************
* Copyright 2011 See AUTHORS file.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.badlogic.gdx.backends.lwjgl;
import java.io.File;
import com.badlogic.gdx.Files;
import com.badlogic.gdx.files.FileHandle;
/** @author mzechner
* @author Nathan Sweet */
public final class LwjglFiles implements Files {
static public final String externalPath = System.getProperty("user.home") + File.separator;
static public final String localPath = new File("").getAbsolutePath() + File.separator;
@Override
public FileHandle getFileHandle (String fileName, FileType type) {
return new LwjglFileHandle(fileName, type);
}
@Override
public FileHandle classpath (String path) {
return new LwjglFileHandle(path, FileType.Classpath);
}
@Override
public FileHandle internal (String path) {
return new LwjglFileHandle(path, FileType.Internal);
}
@Override
public FileHandle external (String path) {
return new LwjglFileHandle(path, FileType.External);
}
@Override
public FileHandle absolute (String path) {
return new LwjglFileHandle(path, FileType.Absolute);
}
@Override
public FileHandle local (String path) {
return new LwjglFileHandle(path, FileType.Local);
}
@Override
public String getExternalStoragePath () {
return externalPath;
}
@Override
public boolean isExternalStorageAvailable () {
return true;
}
@Override
public String getLocalStoragePath () {
return localPath;
}
@Override
public boolean isLocalStorageAvailable () {
return true;
}
}
| apache-2.0 |
TangLj/coolweather | app/src/main/java/win/tanglj/coolweather/util/HttpUtil.java | 441 | package win.tanglj.coolweather.util;
import okhttp3.Callback;
import okhttp3.OkHttpClient;
import okhttp3.Request;
/**
* Created by Laurent on 17/7/22.
*/
public class HttpUtil {
public static void sendOkHttpRequest(String address, Callback callback) {
OkHttpClient client = new OkHttpClient();
Request request = new Request.Builder().url(address).build();
client.newCall(request).enqueue(callback);
}
}
| apache-2.0 |
ronaldploeger/ContentSecurityPolicyFilter | src/main/java/de/saville/csp/ContentSecurityPolicyLoggingReporter.java | 1221 | package de.saville.csp;
import java.io.IOException;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.commons.io.IOUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Logs content security policy violations using Slf4J.
*
* <servlet>
* <servlet-name>ContentSecurityPolicyReporter</servlet-name>
* <servlet-class>de.saville.csp.ContentSecurityPolicyReporter</servlet-class>
* </servlet>
*
* <servlet-mapping>
* <servlet-name>ContentSecurityPolicyReporter</servlet-name>
* <url-pattern>/ContentSecurityPolicyReporter</url-pattern>
* </servlet-mapping>
*/
public class ContentSecurityPolicyLoggingReporter extends HttpServlet {
private static final long serialVersionUID = 1L;
private static final Logger logger = LoggerFactory.getLogger(ContentSecurityPolicyLoggingReporter.class);
@Override
protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
logger.warn(IOUtils.toString(request.getReader()));
}
}
| apache-2.0 |
vyouzhis/oc | src/com/lang/manager/analysis/_RList.java | 313 | package com.lang.manager.analysis;
import org.ppl.BaseClass.LibLang;
public class _RList extends LibLang{
public _RList() {
// TODO Auto-generated constructor stub
String className = this.getClass().getCanonicalName();
GetSubClassName(className);
SelfPath(this.getClass().getPackage().getName());
}
}
| apache-2.0 |
cbeams-archive/spring-framework-2.5.x | test/org/springframework/context/support/StaticMessageSourceTests.java | 10858 | /*
* Copyright 2002-2008 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.context.support;
import java.util.Date;
import java.util.HashMap;
import java.util.Locale;
import java.util.Map;
import org.springframework.beans.MutablePropertyValues;
import org.springframework.beans.factory.support.PropertiesBeanDefinitionReader;
import org.springframework.context.ACATest;
import org.springframework.context.AbstractApplicationContextTests;
import org.springframework.context.BeanThatListens;
import org.springframework.context.ConfigurableApplicationContext;
import org.springframework.context.MessageSourceResolvable;
import org.springframework.context.NoSuchMessageException;
import org.springframework.core.io.ClassPathResource;
/**
* @author Rod Johnson
* @author Juergen Hoeller
*/
public class StaticMessageSourceTests extends AbstractApplicationContextTests {
protected static final String MSG_TXT1_US =
"At '{1,time}' on \"{1,date}\", there was \"{2}\" on planet {0,number,integer}.";
protected static final String MSG_TXT1_UK =
"At '{1,time}' on \"{1,date}\", there was \"{2}\" on station number {0,number,integer}.";
protected static final String MSG_TXT2_US =
"This is a test message in the message catalog with no args.";
protected static final String MSG_TXT3_US =
"This is another test message in the message catalog with no args.";
protected StaticApplicationContext sac;
/** Overridden */
public void testCount() {
// These are only checked for current Ctx (not parent ctx)
assertCount(15);
}
public void testMessageSource() throws NoSuchMessageException {
// Do nothing here since super is looking for errorCodes we
// do NOT have in the Context
}
public void testGetMessageWithDefaultPassedInAndFoundInMsgCatalog() {
// Try with Locale.US
assertTrue("valid msg from staticMsgSource with default msg passed in returned msg from msg catalog for Locale.US",
sac.getMessage("message.format.example2", null, "This is a default msg if not found in MessageSource.", Locale.US)
.equals("This is a test message in the message catalog with no args."));
}
public void testGetMessageWithDefaultPassedInAndNotFoundInMsgCatalog() {
// Try with Locale.US
assertTrue("bogus msg from staticMsgSource with default msg passed in returned default msg for Locale.US",
sac.getMessage("bogus.message", null, "This is a default msg if not found in MessageSource.", Locale.US)
.equals("This is a default msg if not found in MessageSource."));
}
/**
* We really are testing the AbstractMessageSource class here.
* The underlying implementation uses a hashMap to cache messageFormats
* once a message has been asked for. This test is an attempt to
* make sure the cache is being used properly.
* @see org.springframework.context.support.AbstractMessageSource for more details.
*/
public void testGetMessageWithMessageAlreadyLookedFor() {
Object[] arguments = {
new Integer(7), new Date(System.currentTimeMillis()),
"a disturbance in the Force"
};
// The first time searching, we don't care about for this test
// Try with Locale.US
sac.getMessage("message.format.example1", arguments, Locale.US);
// Now msg better be as expected
assertTrue("2nd search within MsgFormat cache returned expected message for Locale.US",
sac.getMessage("message.format.example1", arguments, Locale.US).indexOf(
"there was \"a disturbance in the Force\" on planet 7.") != -1);
Object[] newArguments = {
new Integer(8), new Date(System.currentTimeMillis()),
"a disturbance in the Force"
};
// Now msg better be as expected even with different args
assertTrue("2nd search within MsgFormat cache with different args returned expected message for Locale.US",
sac.getMessage("message.format.example1", newArguments, Locale.US)
.indexOf("there was \"a disturbance in the Force\" on planet 8.") != -1);
}
/**
* Example taken from the javadocs for the java.text.MessageFormat class
*/
public void testGetMessageWithNoDefaultPassedInAndFoundInMsgCatalog() {
Object[] arguments = {
new Integer(7), new Date(System.currentTimeMillis()),
"a disturbance in the Force"
};
/*
Try with Locale.US
Since the msg has a time value in it, we will use String.indexOf(...)
to just look for a substring without the time. This is because it is
possible that by the time we store a time variable in this method
and the time the ResourceBundleMessageSource resolves the msg the
minutes of the time might not be the same.
*/
assertTrue("msg from staticMsgSource for Locale.US substituting args for placeholders is as expected",
sac.getMessage("message.format.example1", arguments, Locale.US)
.indexOf("there was \"a disturbance in the Force\" on planet 7.") != -1);
// Try with Locale.UK
assertTrue("msg from staticMsgSource for Locale.UK substituting args for placeholders is as expected",
sac.getMessage("message.format.example1", arguments, Locale.UK)
.indexOf("there was \"a disturbance in the Force\" on station number 7.") != -1);
// Try with Locale.US - Use a different test msg that requires no args
assertTrue("msg from staticMsgSource for Locale.US that requires no args is as expected",
sac.getMessage("message.format.example2", null, Locale.US)
.equals("This is a test message in the message catalog with no args."));
}
public void testGetMessageWithNoDefaultPassedInAndNotFoundInMsgCatalog() {
// Expecting an exception
try {
// Try with Locale.US
sac.getMessage("bogus.message", null, Locale.US);
fail("bogus msg from staticMsgSource for Locale.US without default msg should have thrown exception");
}
catch (NoSuchMessageException tExcept) {
assertTrue("bogus msg from staticMsgSource for Locale.US without default msg threw expected exception", true);
}
}
public void testMessageSourceResolvable() {
// first code valid
String[] codes1 = new String[] {"message.format.example3", "message.format.example2"};
MessageSourceResolvable resolvable1 = new DefaultMessageSourceResolvable(codes1, null, "default");
try {
assertTrue("correct message retrieved", MSG_TXT3_US.equals(sac.getMessage(resolvable1, Locale.US)));
}
catch (NoSuchMessageException ex) {
fail("Should not throw NoSuchMessageException");
}
// only second code valid
String[] codes2 = new String[] {"message.format.example99", "message.format.example2"};
MessageSourceResolvable resolvable2 = new DefaultMessageSourceResolvable(codes2, null, "default");
try {
assertTrue("correct message retrieved", MSG_TXT2_US.equals(sac.getMessage(resolvable2, Locale.US)));
}
catch (NoSuchMessageException ex) {
fail("Should not throw NoSuchMessageException");
}
// no code valid, but default given
String[] codes3 = new String[] {"message.format.example99", "message.format.example98"};
MessageSourceResolvable resolvable3 = new DefaultMessageSourceResolvable(codes3, null, "default");
try {
assertTrue("correct message retrieved", "default".equals(sac.getMessage(resolvable3, Locale.US)));
}
catch (NoSuchMessageException ex) {
fail("Should not throw NoSuchMessageException");
}
// no code valid, no default
String[] codes4 = new String[] {"message.format.example99", "message.format.example98"};
MessageSourceResolvable resolvable4 = new DefaultMessageSourceResolvable(codes4);
try {
sac.getMessage(resolvable4, Locale.US);
fail("Should have thrown NoSuchMessageException");
}
catch (NoSuchMessageException ex) {
// expected
}
}
/** Run for each test */
protected ConfigurableApplicationContext createContext() throws Exception {
StaticApplicationContext parent = new StaticApplicationContext();
Map m = new HashMap();
m.put("name", "Roderick");
parent.registerPrototype("rod", org.springframework.beans.TestBean.class, new MutablePropertyValues(m));
m.put("name", "Albert");
parent.registerPrototype("father", org.springframework.beans.TestBean.class, new MutablePropertyValues(m));
parent.refresh();
parent.addListener(parentListener);
this.sac = new StaticApplicationContext(parent);
sac.registerSingleton("beanThatListens", BeanThatListens.class, new MutablePropertyValues());
sac.registerSingleton("aca", ACATest.class, new MutablePropertyValues());
sac.registerPrototype("aca-prototype", ACATest.class, new MutablePropertyValues());
PropertiesBeanDefinitionReader reader = new PropertiesBeanDefinitionReader(sac.getDefaultListableBeanFactory());
reader.loadBeanDefinitions(new ClassPathResource("testBeans.properties", getClass()));
sac.refresh();
sac.addListener(listener);
StaticMessageSource messageSource = sac.getStaticMessageSource();
Map usMessages = new HashMap(3);
usMessages.put("message.format.example1", MSG_TXT1_US);
usMessages.put("message.format.example2", MSG_TXT2_US);
usMessages.put("message.format.example3", MSG_TXT3_US);
messageSource.addMessages(usMessages, Locale.US);
messageSource.addMessage("message.format.example1", Locale.UK, MSG_TXT1_UK);
return sac;
}
public void testNestedMessageSourceWithParamInChild() {
StaticMessageSource source = new StaticMessageSource();
StaticMessageSource parent = new StaticMessageSource();
source.setParentMessageSource(parent);
source.addMessage("param", Locale.ENGLISH, "value");
parent.addMessage("with.param", Locale.ENGLISH, "put {0} here");
MessageSourceResolvable resolvable = new DefaultMessageSourceResolvable(
new String[] {"with.param"}, new Object[] {new DefaultMessageSourceResolvable("param")});
assertEquals("put value here", source.getMessage(resolvable, Locale.ENGLISH));
}
public void testNestedMessageSourceWithParamInParent() {
StaticMessageSource source = new StaticMessageSource();
StaticMessageSource parent = new StaticMessageSource();
source.setParentMessageSource(parent);
parent.addMessage("param", Locale.ENGLISH, "value");
source.addMessage("with.param", Locale.ENGLISH, "put {0} here");
MessageSourceResolvable resolvable = new DefaultMessageSourceResolvable(
new String[] {"with.param"}, new Object[] {new DefaultMessageSourceResolvable("param")});
assertEquals("put value here", source.getMessage(resolvable, Locale.ENGLISH));
}
}
| apache-2.0 |
arnost-starosta/midpoint | gui/admin-gui/src/main/java/com/evolveum/midpoint/web/session/UserProfileStorage.java | 3902 | /*
* Copyright (c) 2010-2017 Evolveum
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.evolveum.midpoint.web.session;
import org.apache.commons.lang.Validate;
import com.evolveum.midpoint.util.DebugDumpable;
import com.evolveum.midpoint.util.DebugUtil;
import com.evolveum.midpoint.web.session.UserProfileStorage.TableId;
import java.io.Serializable;
import java.util.HashMap;
import java.util.Map;
/**
* @author shood
* @author Viliam Repan (lazyman)
*/
public class UserProfileStorage implements Serializable, DebugDumpable {
private static final long serialVersionUID = 1L;
public static final int DEFAULT_PAGING_SIZE = 20;
/*
* Enum containing IDs of all tables. where paging size can be adjusted
* */
public enum TableId {
PAGE_USER_SELECTION,
TABLE_ROLES,
TABLE_CASES,
TABLE_USERS,
TABLE_SERVICES,
TABLE_RESOURCES,
TABLE_VALUE_POLICIES,
ROLE_MEMEBER_PANEL,
ORG_MEMEBER_PANEL,
SERVICE_MEMEBER_PANEL,
TREE_TABLE_PANEL_CHILD,
TREE_TABLE_PANEL_MEMBER,
TREE_TABLE_PANEL_MANAGER,
CONF_PAGE_ACCOUNTS,
CONF_DEBUG_LIST_PANEL,
PAGE_CREATED_REPORTS_PANEL,
PAGE_RESOURCE_PANEL,
PAGE_RESOURCES_PANEL,
PAGE_RESOURCE_TASKS_PANEL,
PAGE_RESOURCE_ACCOUNTS_PANEL_REPOSITORY_MODE,
PAGE_RESOURCE_ACCOUNTS_PANEL_RESOURCE_MODE,
PAGE_RESOURCE_ENTITLEMENT_PANEL_REPOSITORY_MODE,
PAGE_RESOURCE_ENTITLEMENT_PANEL_RESOURCE_MODE,
PAGE_RESOURCE_GENERIC_PANEL_REPOSITORY_MODE,
PAGE_RESOURCE_GENERIC_PANEL_RESOURCE_MODE,
PAGE_RESOURCE_OBJECT_CLASS_PANEL,
PAGE_TASKS_PANEL,
PAGE_TASKS_NODES_PANEL,
PAGE_USERS_PANEL,
PAGE_WORK_ITEMS,
PAGE_WORKFLOW_REQUESTS,
PAGE_RESOURCES_CONNECTOR_HOSTS,
PAGE_REPORTS,
PAGE_CERT_CAMPAIGN_OUTCOMES_PANEL,
PAGE_CERT_CAMPAIGNS_PANEL,
PAGE_CERT_DECISIONS_PANEL,
PAGE_CERT_DEFINITIONS_PANEL,
PAGE_CASE_WORK_ITEMS_PANEL,
PAGE_WORK_ITEM_HISTORY_PANEL,
PAGE_TASK_HISTORY_PANEL,
PAGE_TASK_CURRENT_WORK_ITEMS_PANEL,
PAGE_AUDIT_LOG_VIEWER,
TASK_EVENTS_TABLE,
ASSIGNMENTS_TAB_TABLE,
INDUCEMENTS_TAB_TABLE,
INDUCED_ENTITLEMENTS_TAB_TABLE,
POLICY_RULES_TAB_TABLE,
OBJECT_POLICIES_TAB_TABLE,
GLOBAL_POLICY_RULES_TAB_TABLE,
LOGGING_TAB_LOGGER_TABLE,
LOGGING_TAB_APPENDER_TABLE,
NOTIFICATION_TAB_MAIL_SERVER_TABLE
}
private Map<TableId, Integer> tables = new HashMap<>();
public Integer getPagingSize(TableId key) {
Validate.notNull(key, "Key must not be null.");
Integer size = tables.get(key);
return size == null ? DEFAULT_PAGING_SIZE : size;
}
public void setPagingSize(TableId key, Integer size) {
Validate.notNull(key, "Key must not be null.");
tables.put(key, size);
}
public Map<TableId, Integer> getTables() {
return tables;
}
@Override
public String debugDump() {
return debugDump(0);
}
@Override
public String debugDump(int indent) {
StringBuilder sb = new StringBuilder();
DebugUtil.indentDebugDump(sb, indent);
sb.append("UserProfileStorage\n");
DebugUtil.debugDumpWithLabel(sb, "tables", tables, indent+1);
return sb.toString();
}
}
| apache-2.0 |
nuclearg/kyou | src/main/java/com/github/nuclearg/kyou/util/ClassUtils.java | 4266 | package com.github.nuclearg.kyou.util;
import java.lang.annotation.Annotation;
import java.lang.reflect.Constructor;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.reflections.Reflections;
import com.github.nuclearg.kyou.KyouException;
/**
* 类型相关的工具类
*
* @author ng
*
*/
public class ClassUtils {
/**
* 搜索所有打了指定的标,并继承自指定基类的类,构建一个名称到类型的map
*
* @param annotationClass
* @param targetBaseClass
* @param nameParser
* 从Annotation中获取名称的方法
* @return 构造好的map
*/
public static <T, A extends Annotation> Map<String, Class<? extends T>> buildAnnotatedClassMap(Class<A> annotationClass, Class<T> targetBaseClass, AnnotationNameParser<A> nameParser) {
Map<String, Class<? extends T>> map = new LinkedHashMap<>();
for (Class<? extends T> cls : searchClassesWithAnnotation(annotationClass, targetBaseClass)) {
A annotation = cls.getAnnotation(annotationClass);
String name = nameParser.parseName(annotation);
if (map.containsKey(name))
throw new KyouException("annotation name duplicated. name: " + name + ", old: " + map.get(name) + ", new: " + cls);
map.put(name, cls);
}
return Collections.unmodifiableMap(map);
}
/**
* 从Annotation中获取名称
*
* @author ng
*
* @param <A>
*/
public static interface AnnotationNameParser<A extends Annotation> {
/**
* 从Annotation中获取名称
*
* @param annotation
* 要获取名称的Annotation
* @return Annotation的名称
*/
public String parseName(A annotation);
}
/**
* 根据名称创建map中对应类型的实例
*
* @param classMap
* 名称与类型的map
* @param name
* 名称
* @param args
* 传递给构造函数的参数列表
* @return 创建出来的对象。如果指定的name在map中找不到则返回null
*/
public static <T> T newInstance(Map<String, Class<? extends T>> classMap, String name, Object... args) {
if (!classMap.containsKey(name))
return null;
return newInstance(classMap.get(name), args);
}
/**
* 创建指定类型的实例
*
* @param cls
* 类型
* @param args
* 参数列表
* @return 实例
*/
@SuppressWarnings("unchecked")
public static <T> T newInstance(Class<T> cls, Object... args) {
try {
for (Constructor<?> constructor : cls.getDeclaredConstructors())
if (constructor.getParameterTypes().length == args.length) {
constructor.setAccessible(true);
return (T) constructor.newInstance();
}
} catch (Exception ex) {
throw new KyouException(ex);
}
throw new KyouException("no suitable constructor found. args: " + Arrays.toString(args));
}
/**
* 搜索打了指定标的,并且继承自某个基类的类
* <p>
* 将在给定的Annotation的同名包中进行搜索
* </p>
*
* @param annotationClass
* 满足条件的类上面需要打的标
* @param targetBaseClass
* 满足条件的类需要继承自的基类
* @return 满足条件的类的列表
*/
public static <T, A extends Annotation> List<Class<? extends T>> searchClassesWithAnnotation(Class<A> annotationClass, Class<T> targetBaseClass) {
// TODO 把Reflections去掉
Set<Class<?>> classes = new Reflections(annotationClass.getPackage().getName()).getTypesAnnotatedWith(annotationClass);
List<Class<? extends T>> result = new ArrayList<>();
for (Class<?> cls : classes)
if (targetBaseClass.isAssignableFrom(cls))
result.add(cls.asSubclass(targetBaseClass));
return result;
}
}
| apache-2.0 |
SnappyDataInc/snappy-store | gemfirexd/core/src/main/java/io/snappydata/thrift/server/SnappyTSSLServerSocketFactory.java | 4051 | /*
* Copyright (c) 2010-2015 Pivotal Software, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
/*
* Changes for SnappyData data platform.
*
* Portions Copyright (c) 2017-2019 TIBCO Software Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package io.snappydata.thrift.server;
import java.net.InetSocketAddress;
import javax.net.ssl.SSLContext;
import javax.net.ssl.SSLServerSocket;
import javax.net.ssl.SSLServerSocketFactory;
import io.snappydata.thrift.common.SSLFactory;
import io.snappydata.thrift.common.SocketParameters;
import org.apache.thrift.transport.TTransportException;
/**
* A Factory for providing and setting up server SSL wrapped SnappyTServerSocket.
* <p>
* Modified from <code>TSSLTransportFactory</code> to add Snappy specific config.
*/
public abstract class SnappyTSSLServerSocketFactory {
private SnappyTSSLServerSocketFactory() {
// no instance
}
/**
* Get a configured SSL wrapped TServerSocket bound to the specified port and
* interface.
* <p>
* If SocketParameters have SSL properties set, then they are used to set the
* values for the algorithms, keystore, truststore and other settings.
* <p>
* Else if SocketParameters don't have SSL settings, then the default settings
* are used. Default settings are retrieved from server System properties.
*
* Example system properties: -Djavax.net.ssl.trustStore=<truststore location>
* -Djavax.net.ssl.trustStorePassword=password
* -Djavax.net.ssl.keyStore=<keystore location>
* -Djavax.net.ssl.keyStorePassword=password
*
*
* @return An SSL wrapped {@link SnappyTSSLServerSocket}
*/
public static SnappyTSSLServerSocket getServerSocket(
InetSocketAddress bindAddress, SocketParameters params)
throws TTransportException {
SSLContext ctx = SSLFactory.createSSLContext(params);
return createServer(ctx.getServerSocketFactory(), bindAddress, params);
}
private static SnappyTSSLServerSocket createServer(
SSLServerSocketFactory factory, InetSocketAddress bindAddress,
SocketParameters params) throws TTransportException {
try {
SSLServerSocket serverSocket = (SSLServerSocket)factory
.createServerSocket(bindAddress.getPort(), 100,
bindAddress.getAddress());
if (params != null) {
if (params.getSSLEnabledProtocols() != null) {
serverSocket.setEnabledProtocols(params.getSSLEnabledProtocols());
}
if (params.getSSLCipherSuites() != null) {
serverSocket.setEnabledCipherSuites(params.getSSLCipherSuites());
}
serverSocket.setNeedClientAuth(params.getSSLClientAuth());
}
return new SnappyTSSLServerSocket(serverSocket, bindAddress, params);
} catch (Exception e) {
throw new TTransportException(TTransportException.NOT_OPEN,
"Could not bind to host:port " + bindAddress.toString(), e);
}
}
}
| apache-2.0 |
psycopaths/jdart | src/main/gov/nasa/jpf/jdart/summaries/SummaryConfig.java | 1545 | /*
* Copyright (C) 2015, United States Government, as represented by the
* Administrator of the National Aeronautics and Space Administration.
* All rights reserved.
*
* The PSYCO: A Predicate-based Symbolic Compositional Reasoning environment
* platform is licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You may obtain a
* copy of the License at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software distributed
* under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package gov.nasa.jpf.jdart.summaries;
import gov.nasa.jpf.Config;
import gov.nasa.jpf.jdart.config.ConcolicMethodConfig;
import java.util.ArrayList;
import java.util.Collection;
/**
* configuration for summary generation.
*/
public class SummaryConfig {
private final Collection<ConcolicMethodConfig> summaryMethods = new ArrayList<>();
public SummaryConfig(Config conf) {
if (conf.containsKey("summary.methods")) {
for (String id : conf.getStringSet("summary.methods")) {
summaryMethods.add(ConcolicMethodConfig.read(
id, "concolic.method." + id, conf));
}
}
}
public Collection<ConcolicMethodConfig> getSummaryMethods() {
return summaryMethods;
}
}
| apache-2.0 |
mpilone/hazelcastmq | hazelcastmq-jms/src/main/java/org/mpilone/hazelcastmq/jms/UuidIdGenerator.java | 213 | package org.mpilone.hazelcastmq.jms;
import java.util.UUID;
class UuidIdGenerator implements IdGenerator {
/*
*
*/
@Override
public String newId() {
return UUID.randomUUID().toString();
}
}
| apache-2.0 |
trasa/aws-sdk-java | aws-java-sdk-api-gateway/src/main/java/com/amazonaws/services/apigateway/model/transform/UpdateRestApiRequestMarshaller.java | 4048 | /*
* Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazonaws.services.apigateway.model.transform;
import static com.amazonaws.util.StringUtils.UTF8;
import static com.amazonaws.util.StringUtils.COMMA_SEPARATOR;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.OutputStreamWriter;
import java.io.StringWriter;
import java.io.Writer;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.List;
import java.util.regex.Pattern;
import com.amazonaws.AmazonClientException;
import com.amazonaws.Request;
import com.amazonaws.DefaultRequest;
import com.amazonaws.http.HttpMethodName;
import com.amazonaws.services.apigateway.model.*;
import com.amazonaws.transform.Marshaller;
import com.amazonaws.util.BinaryUtils;
import com.amazonaws.util.StringUtils;
import com.amazonaws.util.StringInputStream;
import com.amazonaws.util.json.*;
/**
* UpdateRestApiRequest Marshaller
*/
public class UpdateRestApiRequestMarshaller implements
Marshaller<Request<UpdateRestApiRequest>, UpdateRestApiRequest> {
private static final String DEFAULT_CONTENT_TYPE = "";
public Request<UpdateRestApiRequest> marshall(
UpdateRestApiRequest updateRestApiRequest) {
if (updateRestApiRequest == null) {
throw new AmazonClientException(
"Invalid argument passed to marshall(...)");
}
Request<UpdateRestApiRequest> request = new DefaultRequest<UpdateRestApiRequest>(
updateRestApiRequest, "AmazonApiGateway");
request.setHttpMethod(HttpMethodName.PATCH);
String uriResourcePath = "/restapis/{restapi_id}";
uriResourcePath = uriResourcePath.replace(
"{restapi_id}",
(updateRestApiRequest.getRestApiId() == null) ? ""
: StringUtils.fromString(updateRestApiRequest
.getRestApiId()));
request.setResourcePath(uriResourcePath);
try {
StringWriter stringWriter = new StringWriter();
JSONWriter jsonWriter = new JSONWriter(stringWriter);
jsonWriter.object();
java.util.List<PatchOperation> patchOperationsList = updateRestApiRequest
.getPatchOperations();
if (patchOperationsList != null) {
jsonWriter.key("patchOperations");
jsonWriter.array();
for (PatchOperation patchOperationsListValue : patchOperationsList) {
if (patchOperationsListValue != null) {
PatchOperationJsonMarshaller.getInstance().marshall(
patchOperationsListValue, jsonWriter);
}
}
jsonWriter.endArray();
}
jsonWriter.endObject();
String snippet = stringWriter.toString();
byte[] content = snippet.getBytes(UTF8);
request.setContent(new StringInputStream(snippet));
request.addHeader("Content-Length",
Integer.toString(content.length));
if (!request.getHeaders().containsKey("Content-Type")) {
request.addHeader("Content-Type", DEFAULT_CONTENT_TYPE);
}
} catch (Throwable t) {
throw new AmazonClientException(
"Unable to marshall request to JSON: " + t.getMessage(), t);
}
return request;
}
}
| apache-2.0 |
terradatum/terradatum-jdbc | codegen/codegen/src/main/java/com/terradatum/jdbc/codegen/AttributeInfo.java | 2169 | package com.terradatum.jdbc.codegen;
import com.google.common.base.MoreObjects;
/**
* @author rbellamy@terradatum.com
* @date 2/6/16
*/
public class AttributeInfo {
private boolean isStruct = false;
private boolean isArray = false;
private boolean isString = false;
private boolean isDifferentSchema = false;
private String typeName;
private String name;
public boolean isStruct() {
return isStruct;
}
public void setStruct(boolean struct) {
this.isStruct = struct;
}
public boolean isArray() {
return isArray;
}
public void setArray(boolean array) {
this.isArray = array;
}
public boolean isString() {
return isString;
}
public void setString(boolean string) {
this.isString = string;
}
public boolean isDifferentSchema() {
return isDifferentSchema;
}
public void setDifferentSchema(boolean isDifferentSchema) {
this.isDifferentSchema = isDifferentSchema;
}
public String getTypeName() {
return typeName;
}
public void setTypeName(String typeName) {
this.typeName = typeName;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getSchemaName() {
return Names.toLowerCamelCase(Names.getSchemaAndName(typeName).getKey());
}
public String getClassName() {
return Names.toUpperCamelCase(Names.getSchemaAndName(typeName).getValue());
}
public String getPropertyName() {
return Names.toLowerCamelCase(name);
}
public String getAccessorName() {
return Names.toUpperCamelCase(name);
}
@Override
public String toString() {
return MoreObjects.toStringHelper(this).add("isStruct", isStruct).add("isArray", isArray).add("isString", isString)
.add("isDifferentSchema", isDifferentSchema).add("typeName", typeName).add("name", name).add("struct", isStruct())
.add("array", isArray()).add("string", isString()).add("differentSchema", isDifferentSchema())
.add("schemaName", getSchemaName()).add("className", getClassName()).add("propertyName", getPropertyName())
.add("accessorName", getAccessorName()).toString();
}
}
| apache-2.0 |