repo_name stringlengths 5 108 | path stringlengths 6 333 | size stringlengths 1 6 | content stringlengths 4 977k | license stringclasses 15 values |
|---|---|---|---|---|
vmware/workflowTools | core/src/main/java/com/vmware/action/conditional/ExitIfReusingPostgresDatabaseNotAllowed.java | 1557 | package com.vmware.action.conditional;
import com.vmware.action.BaseAction;
import com.vmware.config.ActionDescription;
import com.vmware.config.WorkflowConfig;
import com.vmware.util.CommandLineUtils;
import com.vmware.util.StringUtils;
import com.vmware.util.SystemUtils;
import com.vmware.util.exception.CancelException;
import com.vmware.util.input.InputUtils;
import com.vmware.util.logging.LogLevel;
@ActionDescription("Exit if existing postgres database should not be reused.")
public class ExitIfReusingPostgresDatabaseNotAllowed extends BaseAction {
public ExitIfReusingPostgresDatabaseNotAllowed(WorkflowConfig config) {
super(config);
super.addExpectedCommandsToBeAvailable("psql");
}
@Override
public void checkIfWorkflowShouldBeFailed() {
super.checkIfWorkflowShouldBeFailed();
super.failIfTrue(StringUtils.isEmpty(fileSystemConfig.databaseSchemaName), "No database schema name set");
}
@Override
public void process() {
String databaseSchemaName = fileSystemConfig.databaseSchemaName;
if (SystemUtils.postgresSchemaExists(databaseSchemaName)) {
String confirmation = InputUtils.readValue("Postgres database " + databaseSchemaName + " already exists, reuse database? (yes/no)");
if (!confirmation.equalsIgnoreCase("yes")) {
log.info("run command dropdb {} to drop database if needed", databaseSchemaName);
throw new CancelException(LogLevel.INFO, "database already exists");
}
}
}
}
| apache-2.0 |
Communote/communote-server | communote/tests/all-versions/integration/src/main/java/com/communote/server/test/external/MockExternalUserRepository.java | 5276 | package com.communote.server.test.external;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import com.communote.common.i18n.LocalizedMessage;
import com.communote.server.api.core.property.StringPropertyTO;
import com.communote.server.api.core.security.AuthorizationException;
import com.communote.server.core.external.ExternalRepositoryException;
import com.communote.server.core.external.ExternalUserRepository;
import com.communote.server.core.external.IncrementalRepositoryChangeTracker;
import com.communote.server.core.security.UserIdentification;
import com.communote.server.model.config.ExternalSystemConfiguration;
import com.communote.server.model.user.UserProfileFields;
import com.communote.server.model.user.ExternalUserAuthentication;
import com.communote.server.persistence.user.ExternalUserVO;
import com.communote.server.plugins.api.externals.ExternalUserGroupAccessor;
/**
* User repository for tests.
*
* @author Communote GmbH - <a href="http://www.communote.com/">http://www.communote.com/</a>
*
*/
public class MockExternalUserRepository implements ExternalUserRepository {
/**
* Mock, dummy configuration
*
* @author Communote GmbH - <a href="http://www.communote.com/">http://www.communote.com/</a>
*
*/
public static class MockExternalSystemConfiguration extends ExternalSystemConfiguration {
/**
*
*/
private static final long serialVersionUID = 1L;
@Override
public String getConfigurationUrl() {
return null;
}
@Override
public String getImageApiUrl() {
return null;
}
}
private final String externalSystemId;
private MockExternalUserGroupAccessor userGroupAccessor;
private final Map<String, ExternalUserVO> users;
private MockExternalSystemConfiguration configuration;
/**
* Create a new user repository
*
* @param externalSystemId
* the ID of the external system of the repository
* @param groupAccessor
* optional group accessor if the repository should be able to provide groups
*/
public MockExternalUserRepository(String externalSystemId,
MockExternalUserGroupAccessor groupAccessor) {
this.externalSystemId = externalSystemId;
this.userGroupAccessor = groupAccessor;
this.users = new HashMap<String, ExternalUserVO>();
}
/**
* Add a user the repository should be able to provide
*
* @param user
* the user to add
*/
public void addUser(ExternalUserVO user) {
users.put(user.getExternalUserName(), user);
}
@Override
public MockExternalSystemConfiguration createConfiguration() {
return new MockExternalSystemConfiguration();
}
@Override
public MockExternalSystemConfiguration getConfiguration() {
return configuration;
}
@Override
public Collection<StringPropertyTO> getExternalLoginProperties(Long userId,
ExternalUserAuthentication externalUserAuthentication) throws AuthorizationException {
return Collections.emptySet();
}
@Override
public String getExternalSystemId() {
return externalSystemId;
}
@Override
public ExternalUserGroupAccessor getExternalUserGroupAccessor() {
return userGroupAccessor;
}
@Override
public IncrementalRepositoryChangeTracker getIncrementalRepositoryChangeTracker(
boolean doFullSynchronization) {
return null;
}
@Override
public LocalizedMessage getName() {
return null;
}
@Override
public int getOrder() {
return 0;
}
@Override
public Set<UserProfileFields> getProvidedProfileFieldNames() {
return new HashSet<>();
}
@Override
public ExternalUserVO getUser(String externalUserId) throws ExternalRepositoryException {
return users.get(externalUserId);
}
@Override
public ExternalUserVO getUser(UserIdentification userIdentification)
throws ExternalRepositoryException {
return users.get(userIdentification.getExternalUserId());
}
@Override
public boolean isActive() {
return true;
}
@Override
public boolean isIncrementalSynchronizationAvailable() {
return false;
}
/**
* Remove a previously added user
*
* @param externalUserId
* the user to remove
*/
public void removeUser(String externalUserId) {
users.remove(externalUserId);
}
public void setConfiguration(MockExternalSystemConfiguration configuration) {
this.configuration = configuration;
}
public void setExternalUserGroupAccessor(MockExternalUserGroupAccessor userGroupAccessor) {
this.userGroupAccessor = userGroupAccessor;
}
@Override
public boolean showInIntegrationOverview() {
return true;
}
}
| apache-2.0 |
terrancesnyder/solr-analytics | lucene/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanNormalizationFilterFactory.java | 1614 | package org.apache.lucene.analysis.de;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
/**
* Simple tests to ensure the German normalization factory is working.
*/
public class TestGermanNormalizationFilterFactory extends BaseTokenStreamTestCase {
public void testStemming() throws Exception {
Reader reader = new StringReader("weißbier");
GermanNormalizationFilterFactory factory = new GermanNormalizationFilterFactory();
TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false));
assertTokenStreamContents(stream, new String[] { "weissbier" });
}
}
| apache-2.0 |
lunisolar/magma | magma-func/src/test/java/eu/lunisolar/magma/func/predicate/LObjLongPredicateTest.java | 12884 | /*
* This file is part of "lunisolar-magma".
*
* (C) Copyright 2014-2022 Lunisolar (http://lunisolar.eu/).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package eu.lunisolar.magma.func.predicate;
import eu.lunisolar.magma.func.*; // NOSONAR
import javax.annotation.Nonnull; // NOSONAR
import javax.annotation.Nullable; // NOSONAR
import java.util.Objects;// NOSONAR
import eu.lunisolar.magma.basics.meta.*; // NOSONAR
import eu.lunisolar.magma.basics.meta.functional.*; // NOSONAR
import eu.lunisolar.magma.basics.meta.functional.type.*; // NOSONAR
import eu.lunisolar.magma.basics.meta.functional.domain.*; // NOSONAR
import eu.lunisolar.magma.func.action.*; // NOSONAR
import eu.lunisolar.magma.func.consumer.*; // NOSONAR
import eu.lunisolar.magma.func.consumer.primitives.*; // NOSONAR
import eu.lunisolar.magma.func.consumer.primitives.bi.*; // NOSONAR
import eu.lunisolar.magma.func.consumer.primitives.obj.*; // NOSONAR
import eu.lunisolar.magma.func.consumer.primitives.tri.*; // NOSONAR
import eu.lunisolar.magma.func.function.*; // NOSONAR
import eu.lunisolar.magma.func.function.conversion.*; // NOSONAR
import eu.lunisolar.magma.func.function.from.*; // NOSONAR
import eu.lunisolar.magma.func.function.to.*; // NOSONAR
import eu.lunisolar.magma.func.operator.binary.*; // NOSONAR
import eu.lunisolar.magma.func.operator.ternary.*; // NOSONAR
import eu.lunisolar.magma.func.operator.unary.*; // NOSONAR
import eu.lunisolar.magma.func.predicate.*; // NOSONAR
import eu.lunisolar.magma.func.supplier.*; // NOSONAR
import org.testng.Assert;
import org.testng.annotations.*; //NOSONAR
import java.util.regex.Pattern; //NOSONAR
import java.text.ParseException; //NOSONAR
import eu.lunisolar.magma.basics.*; //NOSONAR
import eu.lunisolar.magma.basics.exceptions.*; //NOSONAR
import java.util.concurrent.atomic.AtomicInteger; //NOSONAR
import eu.lunisolar.magma.func.tuple.*; // NOSONAR
import java.util.function.*; // NOSONAR
/** The test obviously concentrate on the interface methods the function it self is very simple. */
public class LObjLongPredicateTest<T> {
private static final String ORIGINAL_MESSAGE = "Original message";
private static final String EXCEPTION_WAS_WRAPPED = "Exception was wrapped.";
private static final String NO_EXCEPTION_WERE_THROWN = "No exception were thrown.";
private boolean testValue = true;
private LObjLongPredicate<Integer> sut = new LObjLongPredicate<Integer>(){
public boolean testX(Integer a1,long a2) {
return testValue;
}
};
private LObjLongPredicate<Integer> sutAlwaysThrowing = LObjLongPredicate.objLongPred((a1,a2) -> {
throw new ParseException(ORIGINAL_MESSAGE, 0);
});
private LObjLongPredicate<Integer> sutAlwaysThrowingUnchecked = LObjLongPredicate.objLongPred((a1,a2) -> {
throw new IndexOutOfBoundsException(ORIGINAL_MESSAGE);
});
@Test
public void testTheResult() throws Throwable {
Assert.assertEquals(sut.test(100,100L), testValue);
}
@Test
public void testTupleCall() throws Throwable {
LObjLongPair<Integer> domainObject = Tuple4U.objLongPair(100,100L);
Object result = sut.tupleTest(domainObject);
Assert.assertEquals(result, testValue);
}
@Test
public void testNonNullTest() throws Throwable {
Assert.assertEquals(sut.nonNullTest(100,100L), testValue);
}
@Test
public void testNestingTestUnchecked() throws Throwable {
// then
try {
sutAlwaysThrowingUnchecked.nestingTest(100,100L);
Assert.fail(NO_EXCEPTION_WERE_THROWN);
} catch (Exception e) {
Assert.assertEquals(e.getClass(), IndexOutOfBoundsException.class);
Assert.assertNull(e.getCause());
Assert.assertEquals(e.getMessage(), ORIGINAL_MESSAGE);
}
}
@Test
public void testShovingTestUnchecked() throws Throwable {
// then
try {
sutAlwaysThrowingUnchecked.shovingTest(100,100L);
Assert.fail(NO_EXCEPTION_WERE_THROWN);
} catch (Exception e) {
Assert.assertEquals(e.getClass(), IndexOutOfBoundsException.class);
Assert.assertNull(e.getCause());
Assert.assertEquals(e.getMessage(), ORIGINAL_MESSAGE);
}
}
@Test
public void testApplyAsBooleanShouldNotModifyValue() throws Throwable {
Assert.assertEquals(sut.doApplyAsBoolean(100,100L), testValue);
}
@Test
public void testFunctionalInterfaceDescription() throws Throwable {
Assert.assertEquals(sut.functionalInterfaceDescription(), "LObjLongPredicate: boolean test(T a1,long a2)");
}
@Test
public void testObjLongPredMethod() throws Throwable {
Assert.assertTrue(LObjLongPredicate.objLongPred((a1,a2) -> testValue ) instanceof LObjLongPredicate);
}
@Test
public void testnegate() throws Throwable {
Assert.assertEquals(sut.negate().test(100,100L), !testValue);
}
@DataProvider(name="boolean permutations")
public Object[][] permuations() {
return new Object[][] {
// b1 , b2 , AND , OR , XOR
{false, false, false, false, false },
{true , false, false, true , true },
{false, true , false, true , true },
{true , true , true , true , false },
};
}
@Test(dataProvider="boolean permutations")
public void testAndOrXor(final boolean f1Result, final boolean f2Result, final boolean andResult, final boolean orResult, final boolean xorResult) throws Throwable {
//given
LObjLongPredicate<Integer> fun1 = LObjLongPredicate.objLongPred((a1,a2) -> f1Result);
LObjLongPredicate<Integer> fun2 = LObjLongPredicate.objLongPred((a1,a2) -> f2Result);
//when
LObjLongPredicate<Integer> andFunction = fun1.and(fun2);
LObjLongPredicate<Integer> orFunction = fun1.or(fun2);
LObjLongPredicate<Integer> xorFunction = fun1.xor(fun2);
//then
Assert.assertEquals(andFunction.test(100,100L), andResult);
Assert.assertEquals(orFunction.test(100,100L), orResult);
Assert.assertEquals(xorFunction.test(100,100L), xorResult);
}
@Test
public void testIsEqual() throws Throwable {
//when
LObjLongPredicate<Integer> equals = LObjLongPredicate.isEqual(1,1L);
//then
Assert.assertTrue(equals.test(1,1L));
Assert.assertFalse(equals.test(0,0L));
}
// <editor-fold desc="compose (functional)">
@Test
public void testCompose() throws Throwable {
final ThreadLocal<Boolean> mainFunctionCalled = ThreadLocal.withInitial(()-> false);
final AtomicInteger beforeCalls = new AtomicInteger(0);
//given (+ some assertions)
LObjLongPredicate<Integer> sutO = (a1,a2) -> {
mainFunctionCalled.set(true);
Assert.assertEquals(a1, (Object) 90);
Assert.assertEquals(a2, (Object) 91L);
return true;
};
LFunction<Integer,Integer> before1 = p0 -> {
Assert.assertEquals(p0, (Object) 80);
beforeCalls.incrementAndGet();
return 90;
};
LLongUnaryOperator before2 = p1 -> {
Assert.assertEquals(p1, (Object) 81L);
beforeCalls.incrementAndGet();
return 91L;
};
//when
LObjLongPredicate<Integer> function = sutO.compose(before1,before2);
function.test(80,81L);
//then - finals
Assert.assertTrue(mainFunctionCalled.get());
Assert.assertEquals(beforeCalls.get(), 2);
}
@Test
public void testObjLongPredCompose() throws Throwable {
final ThreadLocal<Boolean> mainFunctionCalled = ThreadLocal.withInitial(()-> false);
final AtomicInteger beforeCalls = new AtomicInteger(0);
//given (+ some assertions)
LObjLongPredicate<Integer> sutO = (a1,a2) -> {
mainFunctionCalled.set(true);
Assert.assertEquals(a1, (Object) 90);
Assert.assertEquals(a2, (Object) 91L);
return true;
};
LFunction<Integer,Integer> before1 = p0 -> {
Assert.assertEquals(p0, (Object) 80);
beforeCalls.incrementAndGet();
return 90;
};
LToLongFunction<Integer> before2 = p1 -> {
Assert.assertEquals(p1, (Object) 81);
beforeCalls.incrementAndGet();
return 91L;
};
//when
LBiPredicate<Integer,Integer> function = sutO.objLongPredCompose(before1,before2);
function.test(80,81);
//then - finals
Assert.assertTrue(mainFunctionCalled.get());
Assert.assertEquals(beforeCalls.get(), 2);
}
// </editor-fold>
// <editor-fold desc="then (functional)">
@Test
public void testBoolToObjLongFunc0() throws Throwable {
final ThreadLocal<Boolean> mainFunctionCalled = ThreadLocal.withInitial(()-> false);
final ThreadLocal<Boolean> thenFunctionCalled = ThreadLocal.withInitial(()-> false);
//given (+ some assertions)
LObjLongPredicate<Integer> sutO = (a1,a2) -> {
mainFunctionCalled.set(true);
Assert.assertEquals(a1, (Object) 80);
Assert.assertEquals(a2, (Object) 81L);
return true;
};
LBoolFunction<Integer> thenFunction = p -> {
thenFunctionCalled.set(true);
// boolean
Assert.assertEquals(p, (Object) true);
// Integer
return 100;
};
//when
LObjLongFunction<Integer,Integer> function = sutO.boolToObjLongFunc(thenFunction);
Integer finalValue = function.apply(80,81L);
//then - finals
Assert.assertEquals(finalValue, (Object) 100);
Assert.assertTrue(mainFunctionCalled.get());
Assert.assertTrue(thenFunctionCalled.get());
}
@Test
public void testBoolToObjLongPred1() throws Throwable {
final ThreadLocal<Boolean> mainFunctionCalled = ThreadLocal.withInitial(()-> false);
final ThreadLocal<Boolean> thenFunctionCalled = ThreadLocal.withInitial(()-> false);
//given (+ some assertions)
LObjLongPredicate<Integer> sutO = (a1,a2) -> {
mainFunctionCalled.set(true);
Assert.assertEquals(a1, (Object) 80);
Assert.assertEquals(a2, (Object) 81L);
return true;
};
LLogicalOperator thenFunction = p -> {
thenFunctionCalled.set(true);
// boolean
Assert.assertEquals(p, (Object) true);
// boolean
return true;
};
//when
LObjLongPredicate<Integer> function = sutO.boolToObjLongPred(thenFunction);
boolean finalValue = function.test(80,81L);
//then - finals
Assert.assertEquals(finalValue, (Object) true);
Assert.assertTrue(mainFunctionCalled.get());
Assert.assertTrue(thenFunctionCalled.get());
}
// </editor-fold>
@Test(expectedExceptions = RuntimeException.class)
public void testShove() {
// given
LObjLongPredicate<Integer> sutThrowing = LObjLongPredicate.objLongPred((a1,a2) -> {
throw new UnsupportedOperationException();
});
// when
sutThrowing.shovingTest(100,100L);
}
@Test
public void testToString() throws Throwable {
Assert.assertTrue(sut.toString().startsWith(this.getClass().getName()+"$"));
Assert.assertTrue(String.format("%s", sut).contains("LObjLongPredicate: boolean test(T a1,long a2)"));
}
@Test
public void isThrowing() {
Assert.assertFalse(sut.isThrowing());
}
//<editor-fold desc="Variants">
private boolean variantLLongObjPred(long a2,Integer a1) {
return true;
}
@Test
public void compilerSubstituteVariantLLongObjPred() {
LObjLongPredicate lambda = LObjLongPredicate./*<T>*/longObjPred(this::variantLLongObjPred);
Assert.assertTrue(lambda instanceof LObjLongPredicate.LLongObjPred);
}
//</editor-fold>
}
| apache-2.0 |
kreon/jnode | jnode-pointchecker-module/src/org/jpntchk/ndl/Checker.java | 5525 | /*
* Licensed to the jNode FTN Platform Develpoment Team (jNode Team)
* under one or more contributor license agreements.
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
* The jNode Team licenses this file to you under the
* Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.jpntchk.ndl;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import jnode.ftn.FtnTools;
import jnode.ftn.types.FtnAddress;
import jnode.ndl.FtnNdlAddress;
import jnode.ndl.NodelistScanner;
/**
*
* @author kreon
*
*/
public enum Checker {
INSTANCE;
private StringBuffer errors = new StringBuffer();
private List<FtnNdlAddress> bosses = new ArrayList<>();
private String currentFileName = "";
public String getCurrentFileName() {
return currentFileName;
}
public void setCurrentFileName(String currentFileName) {
this.currentFileName = currentFileName;
}
public String getErrors() {
return errors.toString();
}
public void clean() {
errors.delete(0, errors.length());
}
public void addError(int linenum, String msg) {
String error = "Line: " + linenum + " error : " + msg + "\n";
errors.append(error);
}
public boolean check(byte[] data, boolean multi, String name) {
bosses.clear();
List<Long> points = new ArrayList<>();
Pattern pBoss = Pattern.compile("^Boss,([0-9]:\\d{1,5}/\\d{1,5})$");
Pattern pPoint = Pattern
.compile("^Point,(\\d+),(\\S+),(\\S+),(\\S+),(\\S+),(\\d+),(\\S*)$");
String[] lines = new String(data).replaceAll("\n", "").split("\r");
int linenum = 0;
int _points = 0;
boolean bossnotfound = false;
for (String line : lines) {
linenum++;
if (line.startsWith(";")) {
if (multi || bosses.isEmpty()) {
continue;
} else {
addError(linenum,
"No multi pointlist, comment after boss string");
}
continue;
}
Matcher m = pBoss.matcher(line);
if (m.matches()) {
FtnNdlAddress boss = NodelistScanner.getInstance().isExists(
new FtnAddress(m.group(1)));
if (boss == null) {
addError(linenum, line + " not found in nodelist\n");
bossnotfound = true;
} else {
if (multi || bosses.isEmpty()) {
if (bosses.contains(m.group(1))) {
addError(linenum, line
+ " already exists in pointlist");
bossnotfound = true;
} else {
bosses.add(boss);
points.clear();
bossnotfound = false;
}
} else {
addError(linenum,
"Not multi pointlist, next boss found\n");
}
continue;
}
m = pPoint.matcher(line);
if (m.matches()) {
if (bosses.isEmpty()) {
addError(linenum,
"Point string present, but no boss present before");
} else {
Long point = Long.valueOf(m.group(1));
if (points.contains(point)) {
if (bossnotfound) {
addError(linenum,
"Point for boss, thats not found in nodelist");
} else {
addError(linenum,
"Point " + point
+ " already exists for "
+ bosses.get(bosses.size() - 1));
}
} else {
String flags = m.group(7);
if (flags != null && checkflags(flags, linenum)) {
points.add(point);
_points++;
}
}
}
continue;
}
addError(linenum, "Unknown line: " + line);
}
}
boolean isReg = false;
boolean isNet = false;
if (multi && bosses.size() > 1) {
// TODO
}
// create netmail :-)
boolean success = (errors.length() == 0);
String subject = (success) ? "Segment checked : OK"
: "Segment checked: Errors";
String text = "File: " + currentFileName + "\nDate: "
+ new Date().toString() + "\n" + "Lines: " + linenum + "\n"
+ "Flags: "
+ ((isReg) ? "regional" : (isNet) ? "net" : "local") + "\n"
+ "Boss lines: " + bosses.size() + "\n" + "Point lines: "
+ _points + "\n";
if (!success)
text += errors.toString();
for (FtnNdlAddress boss : bosses) {
FtnTools.writeNetmail(FtnTools.getPrimaryFtnAddress(), boss, name,
boss.getLine().split(",")[4], subject, text);
}
return success;
}
public boolean checkflags(String flagline, int linenum) {
if (flagline.length() == 0)
return true;
String regex = "^(CM|MO|LO|V21|V22|V29|V32|V32B|V32T|V33|V34|HST|"
+ "H14|H16|H96|MAX|PEP|CSP|ZYX|VFC|Z19|V90C|V90S|X2C|X2S|MNP|V42|"
+ "MN|V42B|XA|XB|XC|XP|XR|XW|XX|V110L|V110H|V120L|V120H|X75|ISDN|"
+ "IBN|IFC|ITN|IVM|IFT|ITX|IUC|IMI|ISE|IP|IEM|#\\d{2}|T[a-zA-Z]{2}|"
+ "I(EM|NA|MI|MA|TN|FT):([-a-zA-Z0-9\\.@]+))$";
boolean uflag = false;
boolean status = true;
Pattern p = Pattern.compile(regex);
for (String flag : flagline.split(",")) {
if (p.matcher(flag).matches()) {
continue;
}
if (flag.equals("U")) {
uflag = true;
continue;
}
if (uflag) {
continue;
}
addError(linenum, "unknown flag: " + flag);
status = false;
}
return status;
}
}
| apache-2.0 |
ichigotake/Android-flow-playground | app/src/main/java/net/ichigotake/playground/screenstructure/flow/AppFlow.java | 3020 | package net.ichigotake.playground.screenstructure.flow;
import android.content.Context;
import android.content.ContextWrapper;
import android.support.annotation.Nullable;
import android.view.LayoutInflater;
import flow.Flow;
/**
* square/flow で提供される {@link Flow} のラッパー
*
* 公式のサンプルからコピペ
*
* https://github.com/square/flow/blob/master/flow-sample/src/main/java/com/example/flow/appflow/AppFlow.java
*/
public class AppFlow {
private static final String APP_FLOW_SERVICE = "app_flow";
public static ScreenContextFactory contextFactory() {
return new ContextFactory();
}
public static ScreenContextFactory contextFactory(ScreenContextFactory delegate) {
return new ContextFactory(delegate);
}
public static boolean isAppFlowSystemService(String name) {
return APP_FLOW_SERVICE.equals(name);
}
public static Flow get(Context context) {
AppFlow appFlow = (AppFlow) context.getSystemService(APP_FLOW_SERVICE);
return appFlow.flow;
}
public static <T> T getScreen(Context context) {
// If this blows up, it's on the caller. We hide the cast as a convenience.
//noinspection unchecked
return (T) LocalScreenWrapper.get(context).localScreen;
}
public static void loadInitialScreen(Context context) {
Flow flow = get(context);
Object screen = get(context).getBackstack().current().getScreen();
flow.resetTo(screen);
}
private final Flow flow;
protected AppFlow(Flow flow) {
this.flow = flow;
}
private static final class LocalScreenWrapper extends ContextWrapper {
static final String LOCAL_WRAPPER_SERVICE = "flow_local_screen_context_wrapper";
private LayoutInflater inflater;
static LocalScreenWrapper get(Context context) {
//noinspection ResourceType
return (LocalScreenWrapper) context.getSystemService(LOCAL_WRAPPER_SERVICE);
}
final Object localScreen;
LocalScreenWrapper(Context base, Object localScreen) {
super(base);
this.localScreen = localScreen;
}
@Override
public Object getSystemService(String name) {
if (LOCAL_WRAPPER_SERVICE.equals(name)) {
return this;
}
if (Context.LAYOUT_INFLATER_SERVICE.equals(name)) {
if (inflater == null) {
inflater = LayoutInflater.from(getBaseContext()).cloneInContext(this);
}
return inflater;
}
return super.getSystemService(name);
}
}
private static final class ContextFactory implements ScreenContextFactory {
@Nullable
private final ScreenContextFactory delegate;
public ContextFactory() {
delegate = null;
}
public ContextFactory(ScreenContextFactory delegate) {
this.delegate = delegate;
}
@Override
public Context setUpContext(Screen screen, Context parentContext) {
if (delegate != null) {
parentContext = delegate.setUpContext(screen, parentContext);
}
return new LocalScreenWrapper(parentContext, screen);
}
@Override
public void tearDownContext(Context context) {
if (delegate != null) {
delegate.tearDownContext(context);
}
}
}
}
| apache-2.0 |
ivstuart/tea-mud | src/main/java/com/ivstuart/tmud/person/statistics/affects/DamageOverTime.java | 1307 | /*
* Copyright 2016. Ivan Stuart
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ivstuart.tmud.person.statistics.affects;
import com.ivstuart.tmud.common.DiceRoll;
import com.ivstuart.tmud.state.Mob;
public class DamageOverTime extends Affect {
protected DiceRoll _damageRoll = null;
public DamageOverTime(Mob mob_, String desc_, int duration_,
DiceRoll damage_) {
super(mob_, desc_, duration_);
_damageRoll = damage_;
}
@Override
public boolean tick() {
super.tick();
int damage = _damageRoll.roll();
_mob.getHp().decrease(damage);
String msg = this._desc + " deals you " + damage + " damage";
_mob.out(msg);
return false;
}
@Override
public String toString() {
return super.toString() + " " + _damageRoll;
}
}
| apache-2.0 |
jmarranz/itsnat_droid | itsnatdroid/src/main/java/org/itsnat/droid/impl/dom/values/DOMElemValuesResources.java | 283 | package org.itsnat.droid.impl.dom.values;
/**
* Created by jmarranz on 02/02/2016.
*/
public class DOMElemValuesResources extends DOMElemValues // extends DOMElemValuesWithChildElem
{
public DOMElemValuesResources()
{
super("resources", null);
}
}
| apache-2.0 |
griffon/griffon | tutorials/reactive/src/integration-test/java/org/example/ReactiveIntegrationTest.java | 3605 | /*
* Copyright 2016-2018 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.example;
import griffon.core.injection.Module;
import griffon.core.mvc.MVCGroup;
import griffon.core.mvc.MVCGroupManager;
import griffon.core.test.GriffonUnitRule;
import griffon.core.test.TestModuleOverrides;
import griffon.inject.BindTo;
import org.codehaus.griffon.runtime.core.injection.AbstractTestingModule;
import org.example.api.Github;
import org.example.api.Repository;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import rx.Observable;
import griffon.annotations.core.Nonnull;
import javax.inject.Inject;
import java.util.List;
import static java.util.Arrays.asList;
import static org.awaitility.Awaitility.await;
import static org.hamcrest.CoreMatchers.hasItem;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
public class ReactiveIntegrationTest {
private static final String ORGANIZATION = "griffon";
static {
// force initialization JavaFX Toolkit
new javafx.embed.swing.JFXPanel();
}
@Rule
public final GriffonUnitRule griffon = new GriffonUnitRule();
@Inject private MVCGroupManager mvcGroupManager;
private MVCGroup group;
@Before
public void setup() {
group = mvcGroupManager.createMVCGroup("reactive");
}
@After
public void cleanup() {
if (group != null) {
group.destroy();
}
}
@Test
public void happyPath() {
// given:
ReactiveModel model = mvcGroupManager.findModel("reactive", ReactiveModel.class);
ReactiveController controller = mvcGroupManager.findController("reactive", ReactiveController.class);
Repository repository = Repository.builder()
.description("griffon")
.fullName("griffon/griffon")
.htmlUrl("https://github.com/griffon/griffon")
.build();
// expectations
when(github.repositories(ORGANIZATION)).thenReturn(Observable.just(repository));
// expect:
assertThat(model.getRepositories().size(), is(0));
// when:
model.setOrganization(ORGANIZATION);
controller.load();
await().until(() -> model.getState() == State.READY);
// then:
assertThat(model.getRepositories().size(), is(1));
assertThat(model.getRepositories(), hasItem(repository));
verify(github).repositories(ORGANIZATION);
}
@BindTo(Github.class)
private final Github github = mock(Github.class);
@Nonnull
@TestModuleOverrides
public List<Module> lazyInitialization() {
return asList(new AbstractTestingModule() {
@Override
protected void doConfigure() {
bind(ReactiveView.class)
.toProvider(() -> mock(ReactiveView.class));
}
});
}
}
| apache-2.0 |
spring-cloud/spring-cloud-gcp | spring-cloud-gcp-data-datastore/src/test/java/org/springframework/cloud/gcp/data/datastore/core/mapping/DatastorePersistentEntityImplTests.java | 8333 | /*
* Copyright 2017-2018 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.gcp.data.datastore.core.mapping;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.springframework.context.ApplicationContext;
import org.springframework.data.annotation.Id;
import org.springframework.data.annotation.Transient;
import org.springframework.data.mapping.PersistentPropertyAccessor;
import org.springframework.data.mapping.SimplePropertyHandler;
import org.springframework.data.util.ClassTypeInformation;
import org.springframework.expression.spel.SpelEvaluationException;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
/**
* Tests for the Datastore Persistent Entity.
*
* @author Chengyuan Zhao
*/
public class DatastorePersistentEntityImplTests {
/**
* used to check exception messages and types.
*/
@Rule
public ExpectedException expectedException = ExpectedException.none();
private final DatastoreMappingContext datastoreMappingContext = new DatastoreMappingContext();
@Test
public void testTableName() {
DatastorePersistentEntityImpl<TestEntity> entity = new DatastorePersistentEntityImpl<>(
ClassTypeInformation.from(TestEntity.class), null);
assertThat(entity.kindName()).isEqualTo("custom_test_kind");
}
@Test
public void testRawTableName() {
DatastorePersistentEntityImpl<EntityNoCustomName> entity = new DatastorePersistentEntityImpl<>(
ClassTypeInformation.from(EntityNoCustomName.class), null);
assertThat(entity.kindName()).isEqualTo("entityNoCustomName");
}
@Test
public void testEmptyCustomTableName() {
DatastorePersistentEntityImpl<EntityEmptyCustomName> entity = new DatastorePersistentEntityImpl<>(
ClassTypeInformation.from(EntityEmptyCustomName.class), null);
assertThat(entity.kindName()).isEqualTo("entityEmptyCustomName");
}
@Test
public void testExpressionResolutionWithoutApplicationContext() {
this.expectedException.expect(SpelEvaluationException.class);
this.expectedException.expectMessage("Property or field 'kindPostfix' cannot be found on null");
DatastorePersistentEntityImpl<EntityWithExpression> entity = new DatastorePersistentEntityImpl<>(
ClassTypeInformation.from(EntityWithExpression.class), null);
entity.kindName();
}
@Test
public void testExpressionResolutionFromApplicationContext() {
DatastorePersistentEntityImpl<EntityWithExpression> entity = new DatastorePersistentEntityImpl<>(
ClassTypeInformation.from(EntityWithExpression.class), null);
ApplicationContext applicationContext = mock(ApplicationContext.class);
when(applicationContext.getBean("kindPostfix")).thenReturn("something");
when(applicationContext.containsBean("kindPostfix")).thenReturn(true);
entity.setApplicationContext(applicationContext);
assertThat(entity.kindName()).isEqualTo("kind_something");
}
@Test
public void testHasIdProperty() {
assertThat(new DatastoreMappingContext().getPersistentEntity(TestEntity.class)
.hasIdProperty()).isTrue();
}
@Test
public void testHasNoIdProperty() {
assertThat(new DatastoreMappingContext().getPersistentEntity(EntityWithNoId.class).hasIdProperty()).isFalse();
}
@Test
public void testGetIdPropertyOrFail() {
this.expectedException.expect(DatastoreDataException.class);
this.expectedException.expectMessage("An ID property was required but does not exist for the type: " +
"class org.springframework.cloud.gcp.data.datastore.core.mapping." +
"DatastorePersistentEntityImplTests$EntityWithNoId");
new DatastoreMappingContext().getPersistentEntity(EntityWithNoId.class)
.getIdPropertyOrFail();
}
@Test
public void testIgnoredProperty() {
TestEntity t = new TestEntity();
t.id = "a";
t.something = "a";
t.notMapped = "b";
DatastorePersistentEntity p = new DatastoreMappingContext()
.getPersistentEntity(TestEntity.class);
PersistentPropertyAccessor accessor = p.getPropertyAccessor(t);
p.doWithProperties(
(SimplePropertyHandler) (property) -> assertThat(accessor.getProperty(property)).isNotEqualTo("b"));
}
@Test
public void testDiscriminationMetadata() {
DatastorePersistentEntity base = this.datastoreMappingContext.getPersistentEntity(TestEntity.class);
DatastorePersistentEntity a1 = this.datastoreMappingContext.getPersistentEntity(SubA1TestEntity.class);
DatastorePersistentEntity a2 = this.datastoreMappingContext.getPersistentEntity(SubA2TestEntity.class);
assertThat(base.kindName()).isEqualTo("custom_test_kind");
assertThat(a1.kindName()).isEqualTo("custom_test_kind");
assertThat(a2.kindName()).isEqualTo("custom_test_kind");
assertThat(base.getDiscriminationFieldName()).isEqualTo("type_disc_col");
assertThat(a1.getDiscriminationFieldName()).isEqualTo("type_disc_col");
assertThat(a2.getDiscriminationFieldName()).isEqualTo("type_disc_col");
assertThat(base.getDiscriminatorValue()).isNull();
assertThat(a1.getDiscriminatorValue()).isEqualTo("A1");
assertThat(a2.getDiscriminatorValue()).isEqualTo("A2");
assertThat(this.datastoreMappingContext.getDiscriminationFamily(TestEntity.class))
.containsExactlyInAnyOrder(SubA1TestEntity.class, SubA2TestEntity.class);
assertThat(this.datastoreMappingContext.getDiscriminationFamily(SubA1TestEntity.class))
.containsExactlyInAnyOrder(SubA2TestEntity.class);
assertThat(this.datastoreMappingContext.getDiscriminationFamily(SubA2TestEntity.class)).isEmpty();
assertThat(this.datastoreMappingContext.getDiscriminationFamily(SubA1TestEntity.class))
.isNotEqualTo(this.datastoreMappingContext.getDiscriminationFamily(DiscrimEntityA.class));
}
@Test
public void testConflictingDiscriminationFieldNames() {
this.expectedException.expect(DatastoreDataException.class);
this.expectedException.expectMessage("This class and its super class both have " +
"discrimination fields but they are different fields: ");
this.datastoreMappingContext.getPersistentEntity(DiscrimEntityB.class);
}
@Test
public void testEntityMissingDiscriminationSuperclass() {
this.expectedException.expect(DatastoreDataException.class);
this.expectedException.expectMessage("This class expects a discrimination field but none are designated");
this.datastoreMappingContext.getPersistentEntity(TestEntityNoSuperclass.class).kindName();
}
@Entity
@DiscriminatorField(field = "colA")
@DiscriminatorValue("a")
private static class DiscrimEntityA {
}
@Entity
@DiscriminatorField(field = "colA")
@DiscriminatorValue("c")
private static class DiscrimEntityC extends DiscrimEntityA {
}
@Entity
@DiscriminatorField(field = "colB")
@DiscriminatorValue("b")
private static class DiscrimEntityB extends DiscrimEntityA {
}
@Entity(name = "custom_test_kind")
@DiscriminatorField(field = "type_disc_col")
private static class TestEntity {
@Id
String id;
@Field(name = "custom_col")
String something;
@Transient
String notMapped;
}
@Entity
@DiscriminatorValue("A1")
private static class SubA1TestEntity extends TestEntity {
@Field(name = "type_disc_col")
String discValue;
}
@Entity
@DiscriminatorValue("A2")
private static class SubA2TestEntity extends SubA1TestEntity {
}
@Entity
@DiscriminatorValue("N/A")
private static class TestEntityNoSuperclass {
@Id
String id;
}
private static class EntityNoCustomName {
@Id
String id;
String something;
}
@Entity
private static class EntityEmptyCustomName {
@Id
String id;
String something;
}
@Entity(name = "#{'kind_'.concat(kindPostfix)}")
private static class EntityWithExpression {
@Id
String id;
String something;
}
private static class EntityWithNoId {
String id;
}
}
| apache-2.0 |
wujiazhong/SpringFrameworkLearning | SourceCode/highlight_spring4/src/main/java/com/wisely/highlight_spring4/ch3/aware/AwareConfig.java | 273 | package com.wisely.highlight_spring4.ch3.aware;
import org.springframework.context.annotation.ComponentScan;
import org.springframework.context.annotation.Configuration;
@Configuration
@ComponentScan("com.wisely.highlight_spring4.ch2.aware")
public class AwareConfig {
}
| apache-2.0 |
jgrades/jgrades | jg-backend/interface/jg-rest-ws-client/src/main/java/org/jgrades/rest/client/admin/general/WorkingDaysServiceClient.java | 1902 | /*
* Copyright (C) 2016 the original author or authors.
*
* This file is part of jGrades Application Project.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*/
package org.jgrades.rest.client.admin.general;
import org.jgrades.rest.api.admin.general.IWorkingDaysService;
import org.jgrades.rest.client.CoreRestClient;
import org.jgrades.rest.client.StatefullRestTemplate;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.core.ParameterizedTypeReference;
import org.springframework.http.HttpEntity;
import org.springframework.http.HttpMethod;
import org.springframework.http.ResponseEntity;
import org.springframework.stereotype.Component;
import java.time.DayOfWeek;
import java.util.Set;
@Component
public class WorkingDaysServiceClient extends CoreRestClient implements IWorkingDaysService {
@Autowired
public WorkingDaysServiceClient(@Value("${rest.backend.base.url}") String backendBaseUrl,
StatefullRestTemplate restTemplate) {
super(backendBaseUrl, restTemplate);
}
@Override
public Set<DayOfWeek> getWorkingDays() {
String serviceUrl = backendBaseUrl + "/workingdays";
ResponseEntity<Set<DayOfWeek>> response = restTemplate.exchange(serviceUrl,
HttpMethod.GET, HttpEntity.EMPTY, new ParameterizedTypeReference<Set<DayOfWeek>>() {
});
return response.getBody();
}
@Override
public void setWorkingDays(Set<DayOfWeek> days) {
String serviceUrl = backendBaseUrl + "/workingdays";
HttpEntity<Set<DayOfWeek>> entity = new HttpEntity<>(days);
restTemplate.exchange(serviceUrl, HttpMethod.POST, entity, Void.class);
}
}
| apache-2.0 |
milg0/onvif-java-lib | src/org/oasis_open/docs/wsn/b_2/UnsupportedPolicyRequestFaultType.java | 2494 | //
// Diese Datei wurde mit der JavaTM Architecture for XML Binding(JAXB) Reference Implementation, v2.2.5-2 generiert
// Siehe <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a>
// �nderungen an dieser Datei gehen bei einer Neukompilierung des Quellschemas verloren.
// Generiert: 2014.02.04 um 12:22:03 PM CET
//
package org.oasis_open.docs.wsn.b_2;
import java.util.ArrayList;
import java.util.List;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlType;
import javax.xml.namespace.QName;
import org.oasis_open.docs.wsrf.bf_2.BaseFaultType;
/**
* <p>
* Java-Klasse f�r UnsupportedPolicyRequestFaultType complex type.
*
* <p>
* Das folgende Schemafragment gibt den erwarteten Content an, der in dieser Klasse enthalten ist.
*
* <pre>
* <complexType name="UnsupportedPolicyRequestFaultType">
* <complexContent>
* <extension base="{http://docs.oasis-open.org/wsrf/bf-2}BaseFaultType">
* <sequence>
* <element name="UnsupportedPolicy" type="{http://www.w3.org/2001/XMLSchema}QName" maxOccurs="unbounded" minOccurs="0"/>
* </sequence>
* <anyAttribute processContents='lax' namespace='##other'/>
* </extension>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "UnsupportedPolicyRequestFaultType", propOrder = { "unsupportedPolicy" })
public class UnsupportedPolicyRequestFaultType extends BaseFaultType {
@XmlElement(name = "UnsupportedPolicy")
protected List<QName> unsupportedPolicy;
/**
* Gets the value of the unsupportedPolicy property.
*
* <p>
* This accessor method returns a reference to the live list, not a snapshot. Therefore any modification you make to the returned list will be present inside the JAXB object.
* This is why there is not a <CODE>set</CODE> method for the unsupportedPolicy property.
*
* <p>
* For example, to add a new item, do as follows:
*
* <pre>
* getUnsupportedPolicy().add(newItem);
* </pre>
*
*
* <p>
* Objects of the following type(s) are allowed in the list {@link QName }
*
*
*/
public List<QName> getUnsupportedPolicy() {
if (unsupportedPolicy == null) {
unsupportedPolicy = new ArrayList<QName>();
}
return this.unsupportedPolicy;
}
}
| apache-2.0 |
Autonomiccs/autonomiccs-platform | autonomic-allocation-algorithms/src/test/java/br/com/autonomiccs/autonomic/allocation/algorithms/impl/ClusterUpwardComparatorTest.java | 2604 |
/*
* This program is part of Autonomiccs "autonomic-platform",
* an open source autonomic cloud computing management platform.
* Copyright (C) 2016 Autonomiccs, Inc.
*
* Licensed to the Autonomiccs, Inc. under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The Autonomiccs, Inc. licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package br.com.autonomiccs.autonomic.allocation.algorithms.impl;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mockito;
import org.mockito.runners.MockitoJUnitRunner;
import br.com.autonomiccs.autonomic.algorithms.commons.beans.ClusterResources;
import br.com.autonomiccs.autonomic.allocation.algorithms.impl.ScoredClustersAllocationAlgorithmPreferenceForSmallHosts.ClusterUpwardComparator;
@RunWith(MockitoJUnitRunner.class)
public class ClusterUpwardComparatorTest {
private ClusterUpwardComparator clusterComparator = new ClusterUpwardComparator();
@Test
public void compareTestH1BiggerThanH2() {
ClusterResources c1 = getScoredClusterMock(1d);
ClusterResources c2 = getScoredClusterMock(0d);
int result = clusterComparator.compare(c1, c2);
verifyGetScoreExecution(c1, c2);
Assert.assertEquals(1, result);
}
@Test
public void compareTestH1SmallerThanH2() {
ClusterResources c1 = getScoredClusterMock(0d);
ClusterResources c2 = getScoredClusterMock(1d);
int result = clusterComparator.compare(c1, c2);
verifyGetScoreExecution(c1, c2);
Assert.assertEquals(-1, result);
}
private void verifyGetScoreExecution(ClusterResources c1, ClusterResources c2) {
Mockito.verify(c1).getScore();
Mockito.verify(c2).getScore();
}
private ClusterResources getScoredClusterMock(double score) {
ClusterResources c = Mockito.mock(ClusterResources.class);
Mockito.when(c.getScore()).thenReturn(score);
return c;
}
}
| apache-2.0 |
aws/aws-sdk-java | aws-java-sdk-cloudsearch/src/main/java/com/amazonaws/services/cloudsearchv2/model/AnalysisScheme.java | 6768 | /*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.cloudsearchv2.model;
import java.io.Serializable;
import javax.annotation.Generated;
/**
* <p>
* Configuration information for an analysis scheme. Each analysis scheme has a unique name and specifies the language
* of the text to be processed. The following options can be configured for an analysis scheme: <code>Synonyms</code>,
* <code>Stopwords</code>, <code>StemmingDictionary</code>, <code>JapaneseTokenizationDictionary</code> and
* <code>AlgorithmicStemming</code>.
* </p>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class AnalysisScheme implements Serializable, Cloneable {
private String analysisSchemeName;
private String analysisSchemeLanguage;
private AnalysisOptions analysisOptions;
/**
* @param analysisSchemeName
*/
public void setAnalysisSchemeName(String analysisSchemeName) {
this.analysisSchemeName = analysisSchemeName;
}
/**
* @return
*/
public String getAnalysisSchemeName() {
return this.analysisSchemeName;
}
/**
* @param analysisSchemeName
* @return Returns a reference to this object so that method calls can be chained together.
*/
public AnalysisScheme withAnalysisSchemeName(String analysisSchemeName) {
setAnalysisSchemeName(analysisSchemeName);
return this;
}
/**
* @param analysisSchemeLanguage
* @see AnalysisSchemeLanguage
*/
public void setAnalysisSchemeLanguage(String analysisSchemeLanguage) {
this.analysisSchemeLanguage = analysisSchemeLanguage;
}
/**
* @return
* @see AnalysisSchemeLanguage
*/
public String getAnalysisSchemeLanguage() {
return this.analysisSchemeLanguage;
}
/**
* @param analysisSchemeLanguage
* @return Returns a reference to this object so that method calls can be chained together.
* @see AnalysisSchemeLanguage
*/
public AnalysisScheme withAnalysisSchemeLanguage(String analysisSchemeLanguage) {
setAnalysisSchemeLanguage(analysisSchemeLanguage);
return this;
}
/**
* @param analysisSchemeLanguage
* @see AnalysisSchemeLanguage
*/
public void setAnalysisSchemeLanguage(AnalysisSchemeLanguage analysisSchemeLanguage) {
withAnalysisSchemeLanguage(analysisSchemeLanguage);
}
/**
* @param analysisSchemeLanguage
* @return Returns a reference to this object so that method calls can be chained together.
* @see AnalysisSchemeLanguage
*/
public AnalysisScheme withAnalysisSchemeLanguage(AnalysisSchemeLanguage analysisSchemeLanguage) {
this.analysisSchemeLanguage = analysisSchemeLanguage.toString();
return this;
}
/**
* @param analysisOptions
*/
public void setAnalysisOptions(AnalysisOptions analysisOptions) {
this.analysisOptions = analysisOptions;
}
/**
* @return
*/
public AnalysisOptions getAnalysisOptions() {
return this.analysisOptions;
}
/**
* @param analysisOptions
* @return Returns a reference to this object so that method calls can be chained together.
*/
public AnalysisScheme withAnalysisOptions(AnalysisOptions analysisOptions) {
setAnalysisOptions(analysisOptions);
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getAnalysisSchemeName() != null)
sb.append("AnalysisSchemeName: ").append(getAnalysisSchemeName()).append(",");
if (getAnalysisSchemeLanguage() != null)
sb.append("AnalysisSchemeLanguage: ").append(getAnalysisSchemeLanguage()).append(",");
if (getAnalysisOptions() != null)
sb.append("AnalysisOptions: ").append(getAnalysisOptions());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof AnalysisScheme == false)
return false;
AnalysisScheme other = (AnalysisScheme) obj;
if (other.getAnalysisSchemeName() == null ^ this.getAnalysisSchemeName() == null)
return false;
if (other.getAnalysisSchemeName() != null && other.getAnalysisSchemeName().equals(this.getAnalysisSchemeName()) == false)
return false;
if (other.getAnalysisSchemeLanguage() == null ^ this.getAnalysisSchemeLanguage() == null)
return false;
if (other.getAnalysisSchemeLanguage() != null && other.getAnalysisSchemeLanguage().equals(this.getAnalysisSchemeLanguage()) == false)
return false;
if (other.getAnalysisOptions() == null ^ this.getAnalysisOptions() == null)
return false;
if (other.getAnalysisOptions() != null && other.getAnalysisOptions().equals(this.getAnalysisOptions()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getAnalysisSchemeName() == null) ? 0 : getAnalysisSchemeName().hashCode());
hashCode = prime * hashCode + ((getAnalysisSchemeLanguage() == null) ? 0 : getAnalysisSchemeLanguage().hashCode());
hashCode = prime * hashCode + ((getAnalysisOptions() == null) ? 0 : getAnalysisOptions().hashCode());
return hashCode;
}
@Override
public AnalysisScheme clone() {
try {
return (AnalysisScheme) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
}
| apache-2.0 |
iam346/dejavu | src/main/java/hu/unideb/inf/dejavu/objects/HighScoreTable.java | 1366 | package hu.unideb.inf.dejavu.objects;
import java.util.List;
import java.util.stream.Collectors;
/**
*
* Az eredménytáblát reprezentáló objektum.
*
* @author gergo
*
*/
public class HighScoreTable {
/**
* Rekordokat tartalmazó lista.
*/
List<HighScoreRecord> table;
/**
* Az eredménytáblát reprezentáló objektum.
*
* @param table
* rekordokat tartalmazó lista
*/
public HighScoreTable(List<HighScoreRecord> table) {
super();
this.table = table;
}
/**
* Visszaadja a rekordok listáját.
*
* @return a rekordok listája
*/
public List<HighScoreRecord> getTable() {
return table;
}
/**
* Rendezés idő szerint.
*
* @param dim
* játékméret
*/
public void sortByTime(String dim) {
int dimension = Integer.parseInt(dim.substring(0, 1));
table = table.stream().sorted((o1, o2) -> o1.getTime().compareTo(o2.getTime()))
.filter(p -> p.getDimension() == dimension).collect(Collectors.toList());
}
/**
* Rendezés kattintásszám szerint.
*
* @param dim
* játékméret
*/
public void sortByClick(String dim) {
int dimension = Integer.parseInt(dim.substring(0, 1));
table = table.stream().sorted((o1, o2) -> {
return o1.getClicks() - o2.getClicks();
}).filter(p -> p.getDimension() == dimension).collect(Collectors.toList());
}
}
| apache-2.0 |
dahlstrom-g/intellij-community | platform/core-api/src/com/intellij/openapi/editor/markup/AttributesFlyweight.java | 13357 | // Copyright 2000-2020 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.intellij.openapi.editor.markup;
import com.intellij.openapi.util.Comparing;
import com.intellij.openapi.util.DefaultJDOMExternalizer;
import com.intellij.openapi.util.InvalidDataException;
import com.intellij.openapi.util.JDOMExternalizerUtil;
import com.intellij.util.ConcurrencyUtil;
import org.intellij.lang.annotations.JdkConstants;
import org.jdom.Element;
import org.jetbrains.annotations.ApiStatus;
import org.jetbrains.annotations.NonNls;
import org.jetbrains.annotations.NotNull;
import java.awt.*;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
public final class AttributesFlyweight {
private static final ConcurrentMap<FlyweightKey, AttributesFlyweight> entries = new ConcurrentHashMap<>();
private static final ThreadLocal<FlyweightKey> ourKey = new ThreadLocal<>();
private final int myHashCode;
private final Color myForeground;
private final Color myBackground;
@JdkConstants.FontStyle
private final int myFontType;
private final Color myEffectColor;
private final EffectType myEffectType;
private final @NotNull Map<EffectType, Color> myAdditionalEffects;
private final Color myErrorStripeColor;
private static final class FlyweightKey implements Cloneable {
private Color foreground;
private Color background;
@JdkConstants.FontStyle
private int fontType;
private Color effectColor;
private EffectType effectType;
private Color errorStripeColor;
private @NotNull Map<EffectType, Color> myAdditionalEffects = Collections.emptyMap();
private FlyweightKey() {
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof FlyweightKey)) return false;
FlyweightKey key = (FlyweightKey)o;
if (fontType != key.fontType) return false;
if (!Objects.equals(background, key.background)) return false;
if (!Objects.equals(effectColor, key.effectColor)) return false;
if (effectType != key.effectType) return false;
if (!Objects.equals(errorStripeColor, key.errorStripeColor)) return false;
if (!Objects.equals(foreground, key.foreground)) return false;
if (!myAdditionalEffects.equals(key.myAdditionalEffects)) return false;
return true;
}
@Override
public int hashCode() {
int result = foreground != null ? foreground.hashCode() : 0;
result = 31 * result + (background != null ? background.hashCode() : 0);
result = 31 * result + fontType;
result = 31 * result + (effectColor != null ? effectColor.hashCode() : 0);
result = 31 * result + (effectType != null ? effectType.hashCode() : 0);
result = 31 * result + (errorStripeColor != null ? errorStripeColor.hashCode() : 0);
result = 31 * result + myAdditionalEffects.hashCode();
return result;
}
@Override
protected FlyweightKey clone() {
try {
return (FlyweightKey)super.clone();
}
catch (CloneNotSupportedException e) {
throw new RuntimeException(e);
}
}
}
public static @NotNull AttributesFlyweight create(Color foreground,
Color background,
@JdkConstants.FontStyle int fontType,
Color effectColor,
EffectType effectType,
Color errorStripeColor) {
return create(foreground, background, fontType, effectColor, effectType, Collections.emptyMap(), errorStripeColor);
}
@ApiStatus.Experimental
public static @NotNull AttributesFlyweight create(Color foreground,
Color background,
@JdkConstants.FontStyle int fontType,
Color effectColor,
EffectType effectType,
@NotNull Map<EffectType, Color> additionalEffects,
Color errorStripeColor) {
FlyweightKey key = ourKey.get();
if (key == null) {
ourKey.set(key = new FlyweightKey());
}
key.foreground = foreground;
key.background = background;
key.fontType = fontType;
key.effectColor = effectColor;
key.effectType = effectType;
key.myAdditionalEffects = additionalEffects.isEmpty() ? Collections.emptyMap() : new HashMap<>(additionalEffects);
key.errorStripeColor = errorStripeColor;
AttributesFlyweight flyweight = entries.get(key);
if (flyweight != null) {
return flyweight;
}
return ConcurrencyUtil.cacheOrGet(entries, key.clone(), new AttributesFlyweight(key));
}
private AttributesFlyweight(@NotNull FlyweightKey key) {
myForeground = key.foreground;
myBackground = key.background;
myFontType = key.fontType;
myEffectColor = key.effectColor;
myEffectType = key.effectType;
myErrorStripeColor = key.errorStripeColor;
myAdditionalEffects = key.myAdditionalEffects;
myHashCode = key.hashCode();
}
public static @NotNull AttributesFlyweight create(@NotNull Element element) throws InvalidDataException {
Color FOREGROUND = DefaultJDOMExternalizer.toColor(JDOMExternalizerUtil.readField(element, "FOREGROUND"));
Color BACKGROUND = DefaultJDOMExternalizer.toColor(JDOMExternalizerUtil.readField(element, "BACKGROUND"));
Color EFFECT_COLOR = DefaultJDOMExternalizer.toColor(JDOMExternalizerUtil.readField(element, "EFFECT_COLOR"));
Color ERROR_STRIPE_COLOR = DefaultJDOMExternalizer.toColor(JDOMExternalizerUtil.readField(element, "ERROR_STRIPE_COLOR"));
int fontType = DefaultJDOMExternalizer.toInt(JDOMExternalizerUtil.readField(element, "FONT_TYPE", "0"));
if (fontType < 0 || fontType > 3) {
fontType = 0;
}
int FONT_TYPE = fontType;
int EFFECT_TYPE = DefaultJDOMExternalizer.toInt(JDOMExternalizerUtil.readField(element, "EFFECT_TYPE", "0"));
// todo additionalEffects are not serialized yet, we have no user-controlled additional effects
return create(FOREGROUND, BACKGROUND, FONT_TYPE, EFFECT_COLOR, toEffectType(EFFECT_TYPE), Collections.emptyMap(), ERROR_STRIPE_COLOR);
}
private static void writeColor(Element element, String fieldName, Color color) {
if (color != null) {
String string = Integer.toString(color.getRGB() & 0xFFFFFF, 16);
JDOMExternalizerUtil.writeField(element, fieldName, string);
}
}
void writeExternal(@NotNull Element element) {
writeColor(element, "FOREGROUND", getForeground());
writeColor(element, "BACKGROUND", getBackground());
int fontType = getFontType();
if (fontType != 0) {
JDOMExternalizerUtil.writeField(element, "FONT_TYPE", String.valueOf(fontType));
}
writeColor(element, "EFFECT_COLOR", getEffectColor());
writeColor(element, "ERROR_STRIPE_COLOR", getErrorStripeColor());
int effectType = fromEffectType(getEffectType());
if (effectType != 0) {
JDOMExternalizerUtil.writeField(element, "EFFECT_TYPE", String.valueOf(effectType));
}
// todo additionalEffects are not serialized yet, we have no user-controlled additional effects
}
private static final int EFFECT_BORDER = 0;
private static final int EFFECT_LINE = 1;
private static final int EFFECT_WAVE = 2;
private static final int EFFECT_STRIKEOUT = 3;
private static final int EFFECT_BOLD_LINE = 4;
private static final int EFFECT_BOLD_DOTTED_LINE = 5;
private static int fromEffectType(EffectType effectType) {
if (effectType == null) return -1;
switch (effectType) {
case BOXED: return EFFECT_BORDER;
case LINE_UNDERSCORE: return EFFECT_LINE;
case BOLD_LINE_UNDERSCORE: return EFFECT_BOLD_LINE;
case STRIKEOUT: return EFFECT_STRIKEOUT;
case WAVE_UNDERSCORE: return EFFECT_WAVE;
case BOLD_DOTTED_LINE: return EFFECT_BOLD_DOTTED_LINE;
default: return -1;
}
}
private static EffectType toEffectType(int effectType) {
switch (effectType) {
case EFFECT_BORDER: return EffectType.BOXED;
case EFFECT_BOLD_LINE: return EffectType.BOLD_LINE_UNDERSCORE;
case EFFECT_LINE: return EffectType.LINE_UNDERSCORE;
case EFFECT_STRIKEOUT: return EffectType.STRIKEOUT;
case EFFECT_WAVE: return EffectType.WAVE_UNDERSCORE;
case EFFECT_BOLD_DOTTED_LINE: return EffectType.BOLD_DOTTED_LINE;
default: return null;
}
}
public Color getForeground() {
return myForeground;
}
public Color getBackground() {
return myBackground;
}
@JdkConstants.FontStyle
public int getFontType() {
return myFontType;
}
public Color getEffectColor() {
return myEffectColor;
}
public EffectType getEffectType() {
return myEffectType;
}
@NotNull
Map<EffectType, Color> getAdditionalEffects() {
return myAdditionalEffects;
}
/**
* @return true iff there are effects to draw in this attributes
*/
@ApiStatus.Experimental
public boolean hasEffects() {
return myEffectColor != null && myEffectType != null || !myAdditionalEffects.isEmpty();
}
/**
* @return all attributes effects, main and additional ones
*/
@NotNull
Map<EffectType, Color> getAllEffects() {
if (myAdditionalEffects.isEmpty()) {
return myEffectType == null || myEffectColor == null ? Collections.emptyMap() : Collections.singletonMap(myEffectType, myEffectColor);
}
TextAttributesEffectsBuilder builder = TextAttributesEffectsBuilder.create();
myAdditionalEffects.forEach(builder::coverWith);
builder.coverWith(myEffectType, myEffectColor);
return builder.getEffectsMap();
}
public Color getErrorStripeColor() {
return myErrorStripeColor;
}
public @NotNull AttributesFlyweight withForeground(Color foreground) {
return Comparing.equal(foreground, myForeground)
? this
: create(foreground, myBackground, myFontType, myEffectColor, myEffectType, myAdditionalEffects, myErrorStripeColor);
}
public @NotNull AttributesFlyweight withBackground(Color background) {
return Comparing.equal(background, myBackground)
? this
: create(myForeground, background, myFontType, myEffectColor, myEffectType, myAdditionalEffects, myErrorStripeColor);
}
public @NotNull AttributesFlyweight withFontType(@JdkConstants.FontStyle int fontType) {
return fontType == myFontType
? this
: create(myForeground, myBackground, fontType, myEffectColor, myEffectType, myAdditionalEffects, myErrorStripeColor);
}
public @NotNull AttributesFlyweight withEffectColor(Color effectColor) {
return Comparing.equal(effectColor, myEffectColor)
? this
: create(myForeground, myBackground, myFontType, effectColor, myEffectType, myAdditionalEffects, myErrorStripeColor);
}
public @NotNull AttributesFlyweight withEffectType(EffectType effectType) {
return Comparing.equal(effectType, myEffectType)
? this
: create(myForeground, myBackground, myFontType, myEffectColor, effectType, myAdditionalEffects, myErrorStripeColor);
}
public @NotNull AttributesFlyweight withErrorStripeColor(Color stripeColor) {
return Comparing.equal(stripeColor, myErrorStripeColor)
? this
: create(myForeground, myBackground, myFontType, myEffectColor, myEffectType, myAdditionalEffects, stripeColor);
}
/**
* @see TextAttributes#setAdditionalEffects(Map)
*/
@ApiStatus.Experimental
public @NotNull AttributesFlyweight withAdditionalEffects(@NotNull Map<EffectType, Color> additionalEffects) {
return Comparing.equal(additionalEffects, myAdditionalEffects)
? this
: create(myForeground, myBackground, myFontType, myEffectColor, myEffectType, additionalEffects, myErrorStripeColor);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
AttributesFlyweight that = (AttributesFlyweight)o;
if (myFontType != that.myFontType) return false;
if (!Objects.equals(myBackground, that.myBackground)) return false;
if (!Objects.equals(myEffectColor, that.myEffectColor)) return false;
if (myEffectType != that.myEffectType) return false;
if (!Objects.equals(myErrorStripeColor, that.myErrorStripeColor)) return false;
if (!Objects.equals(myForeground, that.myForeground)) return false;
if (!myAdditionalEffects.equals(that.myAdditionalEffects)) return false;
return true;
}
@Override
public int hashCode() {
return myHashCode;
}
@NonNls
@Override
public String toString() {
return "AttributesFlyweight{myForeground=" + myForeground + ", myBackground=" + myBackground + ", myFontType=" + myFontType +
", myEffectColor=" + myEffectColor + ", myEffectType=" + myEffectType + ", myErrorStripeColor=" + myErrorStripeColor + '}';
}
}
| apache-2.0 |
GEOINT/saasy | saasy-api/src/main/java/org/geoint/saasy/authz/Constrained.java | 990 | /*
* Copyright 2016 geoint.org.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.geoint.saasy.authz;
import java.util.Set;
/**
* An artifact that is constrained by one or more
* {@link SecurityConstraint constraints}.
*
* @author steve_siebert
*/
public interface Constrained {
/**
* The security constraints for this resource operation.
*
* @return resource security constraints
*/
Set<SecurityConstraint> getConstraints();
}
| apache-2.0 |
aws/aws-lambda-java-libs | aws-lambda-java-events/src/main/java/com/amazonaws/services/lambda/runtime/events/CognitoUserPoolPreTokenGenerationEvent.java | 4107 | /*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.lambda.runtime.events;
import lombok.*;
import java.util.Map;
/**
* Represent the class for the Cognito User Pool Pre Token Generation Lambda Trigger
*
* See <a href="https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-pre-token-generation.html">Pre Token Generation Lambda Trigger</a>
*
* @author jvdl <jvdl@amazon.com>
*/
@Data
@EqualsAndHashCode(callSuper = true)
@NoArgsConstructor
public class CognitoUserPoolPreTokenGenerationEvent extends CognitoUserPoolEvent {
/**
* The request from the Amazon Cognito service.
*/
private Request request;
/**
* The response from your Lambda trigger.
*/
private Response response;
@Builder(setterPrefix = "with")
public CognitoUserPoolPreTokenGenerationEvent(
String version,
String triggerSource,
String region,
String userPoolId,
String userName,
CallerContext callerContext,
Request request,
Response response) {
super(version, triggerSource, region, userPoolId, userName, callerContext);
this.request = request;
this.response = response;
}
@Data
@EqualsAndHashCode(callSuper = true)
@NoArgsConstructor
public static class Request extends CognitoUserPoolEvent.Request {
/**
* One or more key-value pairs that you can provide as custom input to the Lambda function that you specify for the pre token generation trigger.
*/
private Map<String, String> clientMetadata;
/**
* The input object containing the current group configuration.
*/
private GroupConfiguration groupConfiguration;
@Builder(setterPrefix = "with")
public Request(Map<String, String> userAttributes, Map<String, String> clientMetadata, GroupConfiguration groupConfiguration) {
super(userAttributes);
this.clientMetadata = clientMetadata;
this.groupConfiguration = groupConfiguration;
}
}
@Data
@AllArgsConstructor
@Builder(setterPrefix = "with")
@NoArgsConstructor
public static class GroupConfiguration {
/**
* A list of the group names that are associated with the user that the identity token is issued for.
*/
private String[] groupsToOverride;
/**
* A list of the current IAM roles associated with these groups.
*/
private String[] iamRolesToOverride;
/**
* Indicates the preferred IAM role.
*/
private String preferredRole;
}
@Data
@AllArgsConstructor
@Builder(setterPrefix = "with")
@NoArgsConstructor
public static class Response {
private ClaimsOverrideDetails claimsOverrideDetails;
}
@Data
@AllArgsConstructor
@Builder(setterPrefix = "with")
@NoArgsConstructor
public static class ClaimsOverrideDetails {
/**
* A map of one or more key-value pairs of claims to add or override.
* For group related claims, use groupOverrideDetails instead.
*/
private Map<String, String> claimsToAddOrOverride;
/**
* A list that contains claims to be suppressed from the identity token.
*/
private String[] claimsToSuppress;
/**
* The output object containing the current group configuration.
*/
private GroupConfiguration groupOverrideDetails;
}
}
| apache-2.0 |
kris-davison/capricoinj | core/src/test/java/com/capricoinj/net/NetworkAbstractionTests.java | 33418 | /*
* Copyright 2013 Google Inc.
* Copyright 2014 Andreas Schildbach
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.capricoinj.net;
import com.capricoinj.core.Utils;
import com.capricoinj.net.BlockingClient;
import com.capricoinj.net.BlockingClientManager;
import com.capricoinj.net.ClientConnectionManager;
import com.capricoinj.net.MessageWriteTarget;
import com.capricoinj.net.NioClient;
import com.capricoinj.net.NioClientManager;
import com.capricoinj.net.NioServer;
import com.capricoinj.net.ProtobufParser;
import com.capricoinj.net.StreamParserFactory;
import com.capricoinj.paymentchannel.Protos;
import com.capricoinj.paymentchannel.Protos.TwoWayChannelMessage;
import com.google.common.util.concurrent.SettableFuture;
import com.google.protobuf.ByteString;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import javax.net.SocketFactory;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.util.Arrays;
import java.util.Collection;
import java.util.concurrent.atomic.AtomicBoolean;
import static com.google.common.base.Preconditions.checkState;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
@RunWith(value = Parameterized.class)
public class NetworkAbstractionTests {
private static final int CLIENT_MAJOR_VERSION = 1;
private AtomicBoolean fail;
private final int clientType;
private final ClientConnectionManager channels;
@Parameterized.Parameters
public static Collection<Integer[]> parameters() {
return Arrays.asList(new Integer[]{0}, new Integer[]{1}, new Integer[]{2}, new Integer[]{3});
}
public NetworkAbstractionTests(Integer clientType) throws Exception {
this.clientType = clientType;
if (clientType == 0) {
channels = new NioClientManager();
channels.startAsync();
} else if (clientType == 1) {
channels = new BlockingClientManager();
channels.startAsync();
} else
channels = null;
}
private MessageWriteTarget openConnection(SocketAddress addr, ProtobufParser<Protos.TwoWayChannelMessage> parser) throws Exception {
if (clientType == 0 || clientType == 1) {
channels.openConnection(addr, parser);
if (parser.writeTarget.get() == null)
Thread.sleep(100);
return (MessageWriteTarget) parser.writeTarget.get();
} else if (clientType == 2)
return new NioClient(addr, parser, 100);
else if (clientType == 3)
return new BlockingClient(addr, parser, 100, SocketFactory.getDefault(), null);
else
throw new RuntimeException();
}
@Before
public void setUp() {
fail = new AtomicBoolean(false);
}
@After
public void checkFail() {
assertFalse(fail.get());
}
@Test
public void testNullGetNewParser() throws Exception {
final SettableFuture<Void> client1ConnectionOpened = SettableFuture.create();
final SettableFuture<Void> client1Disconnected = SettableFuture.create();
final SettableFuture<Protos.TwoWayChannelMessage> client2MessageReceived = SettableFuture.create();
final SettableFuture<Void> serverConnectionOpen = SettableFuture.create();
final SettableFuture<Void> client2ConnectionOpened = SettableFuture.create();
final SettableFuture<Void> serverConnectionClosed = SettableFuture.create();
final SettableFuture<Void> client2Disconnected = SettableFuture.create();
NioServer server = new NioServer(new StreamParserFactory() {
boolean finishedFirst = false;
@Override
public ProtobufParser<TwoWayChannelMessage> getNewParser(InetAddress inetAddress, int port) {
if (!finishedFirst) {
finishedFirst = true;
return null;
}
return new ProtobufParser<Protos.TwoWayChannelMessage>(new ProtobufParser.Listener<Protos.TwoWayChannelMessage>() {
@Override
public void messageReceived(ProtobufParser<Protos.TwoWayChannelMessage> handler, Protos.TwoWayChannelMessage msg) {
handler.write(msg);
}
@Override
public void connectionOpen(ProtobufParser<Protos.TwoWayChannelMessage> handler) {
serverConnectionOpen.set(null);
}
@Override
public void connectionClosed(ProtobufParser<Protos.TwoWayChannelMessage> handler) {
serverConnectionClosed.set(null);
}
}, Protos.TwoWayChannelMessage.getDefaultInstance(), 1000, 0);
}
}, new InetSocketAddress("localhost", 4243));
server.startAsync();
server.awaitRunning();
ProtobufParser<Protos.TwoWayChannelMessage> clientHandler = new ProtobufParser<Protos.TwoWayChannelMessage>(
new ProtobufParser.Listener<Protos.TwoWayChannelMessage>() {
@Override
public synchronized void messageReceived(ProtobufParser<Protos.TwoWayChannelMessage> handler, Protos.TwoWayChannelMessage msg) {
fail.set(true);
}
@Override
public void connectionOpen(ProtobufParser<Protos.TwoWayChannelMessage> handler) {
client1ConnectionOpened.set(null);
}
@Override
public void connectionClosed(ProtobufParser<Protos.TwoWayChannelMessage> handler) {
client1Disconnected.set(null);
}
}, Protos.TwoWayChannelMessage.getDefaultInstance(), 1000, 0);
openConnection(new InetSocketAddress("localhost", 4243), clientHandler);
client1ConnectionOpened.get();
client1Disconnected.get();
clientHandler = new ProtobufParser<Protos.TwoWayChannelMessage>(
new ProtobufParser.Listener<Protos.TwoWayChannelMessage>() {
@Override
public synchronized void messageReceived(ProtobufParser<Protos.TwoWayChannelMessage> handler, Protos.TwoWayChannelMessage msg) {
if (client2MessageReceived.isDone())
fail.set(true);
client2MessageReceived.set(msg);
}
@Override
public void connectionOpen(ProtobufParser<Protos.TwoWayChannelMessage> handler) {
client2ConnectionOpened.set(null);
}
@Override
public void connectionClosed(ProtobufParser<Protos.TwoWayChannelMessage> handler) {
client2Disconnected.set(null);
}
}, Protos.TwoWayChannelMessage.getDefaultInstance(), 1000, 0);
MessageWriteTarget client = openConnection(new InetSocketAddress("localhost", 4243), clientHandler);
serverConnectionOpen.get();
client2ConnectionOpened.get();
Protos.TwoWayChannelMessage msg = Protos.TwoWayChannelMessage.newBuilder().setType(Protos.TwoWayChannelMessage.MessageType.CHANNEL_OPEN).build();
clientHandler.write(msg);
assertEquals(msg, client2MessageReceived.get());
client.closeConnection();
serverConnectionClosed.get();
client2Disconnected.get();
server.stopAsync().awaitTerminated();
}
@Test
public void basicClientServerTest() throws Exception {
// Tests creating a basic server, opening a client connection and sending a few messages
final SettableFuture<Void> serverConnectionOpen = SettableFuture.create();
final SettableFuture<Void> clientConnectionOpen = SettableFuture.create();
final SettableFuture<Void> serverConnectionClosed = SettableFuture.create();
final SettableFuture<Void> clientConnectionClosed = SettableFuture.create();
final SettableFuture<Protos.TwoWayChannelMessage> clientMessage1Received = SettableFuture.create();
final SettableFuture<Protos.TwoWayChannelMessage> clientMessage2Received = SettableFuture.create();
NioServer server = new NioServer(new StreamParserFactory() {
@Override
public ProtobufParser<TwoWayChannelMessage> getNewParser(InetAddress inetAddress, int port) {
return new ProtobufParser<Protos.TwoWayChannelMessage>(new ProtobufParser.Listener<Protos.TwoWayChannelMessage>() {
@Override
public void messageReceived(ProtobufParser<Protos.TwoWayChannelMessage> handler, Protos.TwoWayChannelMessage msg) {
handler.write(msg);
handler.write(msg);
}
@Override
public void connectionOpen(ProtobufParser<Protos.TwoWayChannelMessage> handler) {
serverConnectionOpen.set(null);
}
@Override
public void connectionClosed(ProtobufParser<Protos.TwoWayChannelMessage> handler) {
serverConnectionClosed.set(null);
}
}, Protos.TwoWayChannelMessage.getDefaultInstance(), 1000, 0);
}
}, new InetSocketAddress("localhost", 4243));
server.startAsync();
server.awaitRunning();
ProtobufParser<Protos.TwoWayChannelMessage> clientHandler = new ProtobufParser<Protos.TwoWayChannelMessage>(
new ProtobufParser.Listener<Protos.TwoWayChannelMessage>() {
@Override
public synchronized void messageReceived(ProtobufParser<Protos.TwoWayChannelMessage> handler, Protos.TwoWayChannelMessage msg) {
if (clientMessage1Received.isDone())
clientMessage2Received.set(msg);
else
clientMessage1Received.set(msg);
}
@Override
public void connectionOpen(ProtobufParser<Protos.TwoWayChannelMessage> handler) {
clientConnectionOpen.set(null);
}
@Override
public void connectionClosed(ProtobufParser<Protos.TwoWayChannelMessage> handler) {
clientConnectionClosed.set(null);
}
}, Protos.TwoWayChannelMessage.getDefaultInstance(), 1000, 0);
MessageWriteTarget client = openConnection(new InetSocketAddress("localhost", 4243), clientHandler);
clientConnectionOpen.get();
serverConnectionOpen.get();
Protos.TwoWayChannelMessage msg = Protos.TwoWayChannelMessage.newBuilder().setType(Protos.TwoWayChannelMessage.MessageType.CHANNEL_OPEN).build();
clientHandler.write(msg);
assertEquals(msg, clientMessage1Received.get());
assertEquals(msg, clientMessage2Received.get());
client.closeConnection();
serverConnectionClosed.get();
clientConnectionClosed.get();
server.stopAsync();
server.awaitTerminated();
assertFalse(server.isRunning());
}
@Test
public void basicTimeoutTest() throws Exception {
// Tests various timeout scenarios
final SettableFuture<Void> serverConnection1Open = SettableFuture.create();
final SettableFuture<Void> clientConnection1Open = SettableFuture.create();
final SettableFuture<Void> serverConnection1Closed = SettableFuture.create();
final SettableFuture<Void> clientConnection1Closed = SettableFuture.create();
final SettableFuture<Void> serverConnection2Open = SettableFuture.create();
final SettableFuture<Void> clientConnection2Open = SettableFuture.create();
final SettableFuture<Void> serverConnection2Closed = SettableFuture.create();
final SettableFuture<Void> clientConnection2Closed = SettableFuture.create();
NioServer server = new NioServer(new StreamParserFactory() {
@Override
public ProtobufParser<Protos.TwoWayChannelMessage> getNewParser(InetAddress inetAddress, int port) {
return new ProtobufParser<Protos.TwoWayChannelMessage>(new ProtobufParser.Listener<Protos.TwoWayChannelMessage>() {
@Override
public void messageReceived(ProtobufParser<Protos.TwoWayChannelMessage> handler, Protos.TwoWayChannelMessage msg) {
fail.set(true);
}
@Override
public synchronized void connectionOpen(ProtobufParser<Protos.TwoWayChannelMessage> handler) {
if (serverConnection1Open.isDone()) {
handler.setSocketTimeout(0);
serverConnection2Open.set(null);
} else
serverConnection1Open.set(null);
}
@Override
public synchronized void connectionClosed(ProtobufParser<Protos.TwoWayChannelMessage> handler) {
if (serverConnection1Closed.isDone()) {
serverConnection2Closed.set(null);
} else
serverConnection1Closed.set(null);
}
}, Protos.TwoWayChannelMessage.getDefaultInstance(), 1000, 10);
}
}, new InetSocketAddress("localhost", 4243));
server.startAsync();
server.awaitRunning();
openConnection(new InetSocketAddress("localhost", 4243), new ProtobufParser<Protos.TwoWayChannelMessage>(
new ProtobufParser.Listener<Protos.TwoWayChannelMessage>() {
@Override
public void messageReceived(ProtobufParser<Protos.TwoWayChannelMessage> handler, Protos.TwoWayChannelMessage msg) {
fail.set(true);
}
@Override
public void connectionOpen(ProtobufParser<Protos.TwoWayChannelMessage> handler) {
clientConnection1Open.set(null);
}
@Override
public void connectionClosed(ProtobufParser<Protos.TwoWayChannelMessage> handler) {
clientConnection1Closed.set(null);
}
}, Protos.TwoWayChannelMessage.getDefaultInstance(), 1000, 0));
clientConnection1Open.get();
serverConnection1Open.get();
long closeDelayStart = System.currentTimeMillis();
clientConnection1Closed.get();
serverConnection1Closed.get();
long closeDelayFinish = System.currentTimeMillis();
ProtobufParser<Protos.TwoWayChannelMessage> client2Handler = new ProtobufParser<Protos.TwoWayChannelMessage>(
new ProtobufParser.Listener<Protos.TwoWayChannelMessage>() {
@Override
public void messageReceived(ProtobufParser<Protos.TwoWayChannelMessage> handler, Protos.TwoWayChannelMessage msg) {
fail.set(true);
}
@Override
public void connectionOpen(ProtobufParser<Protos.TwoWayChannelMessage> handler) {
clientConnection2Open.set(null);
}
@Override
public void connectionClosed(ProtobufParser<Protos.TwoWayChannelMessage> handler) {
clientConnection2Closed.set(null);
}
}, Protos.TwoWayChannelMessage.getDefaultInstance(), 1000, 0);
openConnection(new InetSocketAddress("localhost", 4243), client2Handler);
clientConnection2Open.get();
serverConnection2Open.get();
Thread.sleep((closeDelayFinish - closeDelayStart) * 10);
assertFalse(clientConnection2Closed.isDone() || serverConnection2Closed.isDone());
client2Handler.setSocketTimeout(10);
clientConnection2Closed.get();
serverConnection2Closed.get();
server.stopAsync();
server.awaitTerminated();
}
@Test
public void largeDataTest() throws Exception {
/** Test various large-data handling, essentially testing {@link ProtobufParser#receiveBytes(java.nio.ByteBuffer)} */
final SettableFuture<Void> serverConnectionOpen = SettableFuture.create();
final SettableFuture<Void> clientConnectionOpen = SettableFuture.create();
final SettableFuture<Void> serverConnectionClosed = SettableFuture.create();
final SettableFuture<Void> clientConnectionClosed = SettableFuture.create();
final SettableFuture<Protos.TwoWayChannelMessage> clientMessage1Received = SettableFuture.create();
final SettableFuture<Protos.TwoWayChannelMessage> clientMessage2Received = SettableFuture.create();
final SettableFuture<Protos.TwoWayChannelMessage> clientMessage3Received = SettableFuture.create();
final SettableFuture<Protos.TwoWayChannelMessage> clientMessage4Received = SettableFuture.create();
NioServer server = new NioServer(new StreamParserFactory() {
@Override
public ProtobufParser<Protos.TwoWayChannelMessage> getNewParser(InetAddress inetAddress, int port) {
return new ProtobufParser<Protos.TwoWayChannelMessage>(new ProtobufParser.Listener<Protos.TwoWayChannelMessage>() {
@Override
public void messageReceived(ProtobufParser<Protos.TwoWayChannelMessage> handler, Protos.TwoWayChannelMessage msg) {
handler.write(msg);
}
@Override
public void connectionOpen(ProtobufParser<Protos.TwoWayChannelMessage> handler) {
serverConnectionOpen.set(null);
}
@Override
public void connectionClosed(ProtobufParser<Protos.TwoWayChannelMessage> handler) {
serverConnectionClosed.set(null);
}
}, Protos.TwoWayChannelMessage.getDefaultInstance(), 0x10000, 0);
}
}, new InetSocketAddress("localhost", 4243));
server.startAsync();
server.awaitRunning();
ProtobufParser<Protos.TwoWayChannelMessage> clientHandler = new ProtobufParser<Protos.TwoWayChannelMessage>(
new ProtobufParser.Listener<Protos.TwoWayChannelMessage>() {
@Override
public synchronized void messageReceived(ProtobufParser<Protos.TwoWayChannelMessage> handler, Protos.TwoWayChannelMessage msg) {
if (clientMessage1Received.isDone()) {
if (clientMessage2Received.isDone()) {
if (clientMessage3Received.isDone()) {
if (clientMessage4Received.isDone())
fail.set(true);
clientMessage4Received.set(msg);
} else
clientMessage3Received.set(msg);
} else
clientMessage2Received.set(msg);
} else
clientMessage1Received.set(msg);
}
@Override
public void connectionOpen(ProtobufParser<Protos.TwoWayChannelMessage> handler) {
clientConnectionOpen.set(null);
}
@Override
public void connectionClosed(ProtobufParser<Protos.TwoWayChannelMessage> handler) {
clientConnectionClosed.set(null);
}
}, Protos.TwoWayChannelMessage.getDefaultInstance(), 0x10000, 0);
MessageWriteTarget client = openConnection(new InetSocketAddress("localhost", 4243), clientHandler);
clientConnectionOpen.get();
serverConnectionOpen.get();
// Large message that is larger than buffer and equal to maximum message size
Protos.TwoWayChannelMessage msg = Protos.TwoWayChannelMessage.newBuilder()
.setType(Protos.TwoWayChannelMessage.MessageType.CHANNEL_OPEN)
.setClientVersion(Protos.ClientVersion.newBuilder()
.setMajor(CLIENT_MAJOR_VERSION)
.setPreviousChannelContractHash(ByteString.copyFrom(new byte[0x10000 - 12])))
.build();
// Small message that fits in the buffer
Protos.TwoWayChannelMessage msg2 = Protos.TwoWayChannelMessage.newBuilder()
.setType(Protos.TwoWayChannelMessage.MessageType.CHANNEL_OPEN)
.setClientVersion(Protos.ClientVersion.newBuilder()
.setMajor(CLIENT_MAJOR_VERSION)
.setPreviousChannelContractHash(ByteString.copyFrom(new byte[1])))
.build();
// Break up the message into chunks to simulate packet network (with strange MTUs...)
byte[] messageBytes = msg.toByteArray();
byte[] messageLength = new byte[4];
Utils.uint32ToByteArrayBE(messageBytes.length, messageLength, 0);
client.writeBytes(new byte[]{messageLength[0], messageLength[1]});
Thread.sleep(10);
client.writeBytes(new byte[]{messageLength[2], messageLength[3]});
Thread.sleep(10);
client.writeBytes(new byte[]{messageBytes[0], messageBytes[1]});
Thread.sleep(10);
client.writeBytes(Arrays.copyOfRange(messageBytes, 2, messageBytes.length - 1));
Thread.sleep(10);
// Now send the end of msg + msg2 + msg3 all at once
byte[] messageBytes2 = msg2.toByteArray();
byte[] messageLength2 = new byte[4];
Utils.uint32ToByteArrayBE(messageBytes2.length, messageLength2, 0);
byte[] sendBytes = Arrays.copyOf(new byte[] {messageBytes[messageBytes.length-1]}, 1 + messageBytes2.length*2 + messageLength2.length*2);
System.arraycopy(messageLength2, 0, sendBytes, 1, 4);
System.arraycopy(messageBytes2, 0, sendBytes, 5, messageBytes2.length);
System.arraycopy(messageLength2, 0, sendBytes, 5 + messageBytes2.length, 4);
System.arraycopy(messageBytes2, 0, sendBytes, 9 + messageBytes2.length, messageBytes2.length);
client.writeBytes(sendBytes);
assertEquals(msg, clientMessage1Received.get());
assertEquals(msg2, clientMessage2Received.get());
assertEquals(msg2, clientMessage3Received.get());
// Now resent msg2 in chunks, by itself
Utils.uint32ToByteArrayBE(messageBytes2.length, messageLength2, 0);
client.writeBytes(new byte[]{messageLength2[0], messageLength2[1]});
Thread.sleep(10);
client.writeBytes(new byte[]{messageLength2[2], messageLength2[3]});
Thread.sleep(10);
client.writeBytes(new byte[]{messageBytes2[0], messageBytes2[1]});
Thread.sleep(10);
client.writeBytes(new byte[]{messageBytes2[2], messageBytes2[3]});
Thread.sleep(10);
client.writeBytes(Arrays.copyOfRange(messageBytes2, 4, messageBytes2.length));
assertEquals(msg2, clientMessage4Received.get());
Protos.TwoWayChannelMessage msg5 = Protos.TwoWayChannelMessage.newBuilder()
.setType(Protos.TwoWayChannelMessage.MessageType.CHANNEL_OPEN)
.setClientVersion(Protos.ClientVersion.newBuilder()
.setMajor(CLIENT_MAJOR_VERSION)
.setPreviousChannelContractHash(ByteString.copyFrom(new byte[0x10000 - 11])))
.build();
try {
clientHandler.write(msg5);
} catch (IllegalStateException e) {}
// Override max size and make sure the server drops our connection
byte[] messageLength5 = new byte[4];
Utils.uint32ToByteArrayBE(msg5.toByteArray().length, messageLength5, 0);
client.writeBytes(messageLength5);
serverConnectionClosed.get();
clientConnectionClosed.get();
server.stopAsync();
server.awaitTerminated();
}
@Test
public void testConnectionEventHandlers() throws Exception {
final SettableFuture<Void> serverConnection1Open = SettableFuture.create();
final SettableFuture<Void> serverConnection2Open = SettableFuture.create();
final SettableFuture<Void> serverConnection3Open = SettableFuture.create();
final SettableFuture<Void> client1ConnectionOpen = SettableFuture.create();
final SettableFuture<Void> client2ConnectionOpen = SettableFuture.create();
final SettableFuture<Void> client3ConnectionOpen = SettableFuture.create();
final SettableFuture<Void> serverConnectionClosed1 = SettableFuture.create();
final SettableFuture<Void> serverConnectionClosed2 = SettableFuture.create();
final SettableFuture<Void> serverConnectionClosed3 = SettableFuture.create();
final SettableFuture<Void> client1ConnectionClosed = SettableFuture.create();
final SettableFuture<Void> client2ConnectionClosed = SettableFuture.create();
final SettableFuture<Void> client3ConnectionClosed = SettableFuture.create();
final SettableFuture<Protos.TwoWayChannelMessage> client1MessageReceived = SettableFuture.create();
final SettableFuture<Protos.TwoWayChannelMessage> client2MessageReceived = SettableFuture.create();
final SettableFuture<Protos.TwoWayChannelMessage> client3MessageReceived = SettableFuture.create();
NioServer server = new NioServer(new StreamParserFactory() {
@Override
public ProtobufParser<Protos.TwoWayChannelMessage> getNewParser(InetAddress inetAddress, int port) {
return new ProtobufParser<Protos.TwoWayChannelMessage>(new ProtobufParser.Listener<Protos.TwoWayChannelMessage>() {
@Override
public void messageReceived(ProtobufParser<Protos.TwoWayChannelMessage> handler, Protos.TwoWayChannelMessage msg) {
handler.write(msg);
}
@Override
public synchronized void connectionOpen(ProtobufParser<Protos.TwoWayChannelMessage> handler) {
if (serverConnection1Open.isDone()) {
if (serverConnection2Open.isDone())
serverConnection3Open.set(null);
else
serverConnection2Open.set(null);
} else
serverConnection1Open.set(null);
}
@Override
public synchronized void connectionClosed(ProtobufParser<Protos.TwoWayChannelMessage> handler) {
if (serverConnectionClosed1.isDone()) {
if (serverConnectionClosed2.isDone()) {
checkState(!serverConnectionClosed3.isDone());
serverConnectionClosed3.set(null);
} else
serverConnectionClosed2.set(null);
} else
serverConnectionClosed1.set(null);
}
}, Protos.TwoWayChannelMessage.getDefaultInstance(), 1000, 0);
}
}, new InetSocketAddress("localhost", 4243));
server.startAsync();
server.awaitRunning();
ProtobufParser<Protos.TwoWayChannelMessage> client1Handler = new ProtobufParser<Protos.TwoWayChannelMessage>(
new ProtobufParser.Listener<Protos.TwoWayChannelMessage>() {
@Override
public void messageReceived(ProtobufParser<Protos.TwoWayChannelMessage> handler, Protos.TwoWayChannelMessage msg) {
client1MessageReceived.set(msg);
}
@Override
public void connectionOpen(ProtobufParser<Protos.TwoWayChannelMessage> handler) {
client1ConnectionOpen.set(null);
}
@Override
public void connectionClosed(ProtobufParser<Protos.TwoWayChannelMessage> handler) {
client1ConnectionClosed.set(null);
}
}, Protos.TwoWayChannelMessage.getDefaultInstance(), 1000, 0);
MessageWriteTarget client1 = openConnection(new InetSocketAddress("localhost", 4243), client1Handler);
client1ConnectionOpen.get();
serverConnection1Open.get();
ProtobufParser<Protos.TwoWayChannelMessage> client2Handler = new ProtobufParser<Protos.TwoWayChannelMessage>(
new ProtobufParser.Listener<Protos.TwoWayChannelMessage>() {
@Override
public void messageReceived(ProtobufParser<Protos.TwoWayChannelMessage> handler, Protos.TwoWayChannelMessage msg) {
client2MessageReceived.set(msg);
}
@Override
public void connectionOpen(ProtobufParser<Protos.TwoWayChannelMessage> handler) {
client2ConnectionOpen.set(null);
}
@Override
public void connectionClosed(ProtobufParser<Protos.TwoWayChannelMessage> handler) {
client2ConnectionClosed.set(null);
}
}, Protos.TwoWayChannelMessage.getDefaultInstance(), 1000, 0);
openConnection(new InetSocketAddress("localhost", 4243), client2Handler);
client2ConnectionOpen.get();
serverConnection2Open.get();
ProtobufParser<Protos.TwoWayChannelMessage> client3Handler = new ProtobufParser<Protos.TwoWayChannelMessage>(
new ProtobufParser.Listener<Protos.TwoWayChannelMessage>() {
@Override
public void messageReceived(ProtobufParser<Protos.TwoWayChannelMessage> handler, Protos.TwoWayChannelMessage msg) {
client3MessageReceived.set(msg);
}
@Override
public void connectionOpen(ProtobufParser<Protos.TwoWayChannelMessage> handler) {
client3ConnectionOpen.set(null);
}
@Override
public synchronized void connectionClosed(ProtobufParser<Protos.TwoWayChannelMessage> handler) {
checkState(!client3ConnectionClosed.isDone());
client3ConnectionClosed.set(null);
}
}, Protos.TwoWayChannelMessage.getDefaultInstance(), 1000, 0);
NioClient client3 = new NioClient(new InetSocketAddress("localhost", 4243), client3Handler, 0);
client3ConnectionOpen.get();
serverConnection3Open.get();
Protos.TwoWayChannelMessage msg = Protos.TwoWayChannelMessage.newBuilder().setType(Protos.TwoWayChannelMessage.MessageType.CHANNEL_OPEN).build();
client1Handler.write(msg);
assertEquals(msg, client1MessageReceived.get());
Protos.TwoWayChannelMessage msg2 = Protos.TwoWayChannelMessage.newBuilder().setType(Protos.TwoWayChannelMessage.MessageType.INITIATE).build();
client2Handler.write(msg2);
assertEquals(msg2, client2MessageReceived.get());
client1.closeConnection();
serverConnectionClosed1.get();
client1ConnectionClosed.get();
Protos.TwoWayChannelMessage msg3 = Protos.TwoWayChannelMessage.newBuilder().setType(Protos.TwoWayChannelMessage.MessageType.CLOSE).build();
client3Handler.write(msg3);
assertEquals(msg3, client3MessageReceived.get());
// Try to create a race condition by triggering handlerThread closing and client3 closing at the same time
// This often triggers ClosedByInterruptException in handleKey
server.stopAsync();
server.selector.wakeup();
client3.closeConnection();
client3ConnectionClosed.get();
serverConnectionClosed3.get();
server.stopAsync();
server.awaitTerminated();
client2ConnectionClosed.get();
serverConnectionClosed2.get();
server.stopAsync();
server.awaitTerminated();
}
}
| apache-2.0 |
fuliang/leetcode | src/RemoveElement.java | 713 | /**
* Created by Administrator on 2015/4/5.
*/
public class RemoveElement {
public int removeElement(int[] A, int elem) {
int eIndex = 0;
int i = 0;
while (i < A.length) {
while (i < A.length && A[i] != elem) {
A[eIndex++] = A[i++];
}
while (i < A.length && A[i] == elem) {
++i;
}
}
return eIndex;
}
public static void main(String[] args) {
RemoveElement sol = new RemoveElement();
int[] a = {};
int len = sol.removeElement(a, 1);
for (int i = 0; i < len; i++) {
System.out.println(a[i]);
}
}
} | apache-2.0 |
jmochel/c3 | cli/src/main/java/org/saltations/c3/cli/OutputDirValidation.java | 578 | package org.saltations.c3.cli;
import com.beust.jcommander.IValueValidator;
import com.beust.jcommander.ParameterException;
import java.io.File;
import static java.lang.String.format;
/*
* Verify that it is a folder and it exists
*/
public class OutputDirValidation implements IValueValidator<File>
{
@Override
public void validate(String name, File value) throws ParameterException
{
if (value.exists() && value.isDirectory())
{
throw new ParameterException(format("File <%s> is a folder.", value.getName()));
}
}
}
| apache-2.0 |
sonatype/sisu-guice | core/src/com/google/inject/internal/DeclaredMembers.java | 3462 | /*
* Copyright (C) 2019 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.inject.internal;
import com.google.common.collect.ComparisonChain;
import com.google.common.collect.Ordering;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
import java.util.Arrays;
/**
* Utility class for retrieving declared fields or methods in a particular order, because the JVM
* doesn't guarantee ordering for listing declared methods. We don't externally guarantee an
* ordering, but having a consistent ordering allows deterministic behavior and simpler tests.
*/
public final class DeclaredMembers {
private DeclaredMembers() {}
public static Field[] getDeclaredFields(Class<?> type) {
Field[] fields = type.getDeclaredFields();
Arrays.sort(fields, FIELD_ORDERING);
return fields;
}
public static Method[] getDeclaredMethods(Class<?> type) {
Method[] methods = type.getDeclaredMethods();
Arrays.sort(methods, METHOD_ORDERING);
return methods;
}
/**
* An ordering suitable for comparing two classes if they are loaded by the same classloader
*
* <p>Within a single classloader there can only be one class with a given name, so we just
* compare the names.
*/
private static final Ordering<Class<?>> CLASS_ORDERING =
new Ordering<Class<?>>() {
@Override
public int compare(Class<?> o1, Class<?> o2) {
return o1.getName().compareTo(o2.getName());
}
};
/**
* An ordering suitable for comparing two fields if they are owned by the same class.
*
* <p>Within a single class it is sufficent to compare the non-generic field signature which
* consists of the field name and type.
*/
private static final Ordering<Field> FIELD_ORDERING =
new Ordering<Field>() {
@Override
public int compare(Field left, Field right) {
return ComparisonChain.start()
.compare(left.getName(), right.getName())
.compare(left.getType(), right.getType(), CLASS_ORDERING)
.result();
}
};
/**
* An ordering suitable for comparing two methods if they are owned by the same class.
*
* <p>Within a single class it is sufficient to compare the non-generic method signature which
* consists of the name, return type and parameter types.
*/
private static final Ordering<Method> METHOD_ORDERING =
new Ordering<Method>() {
@Override
public int compare(Method left, Method right) {
return ComparisonChain.start()
.compare(left.getName(), right.getName())
.compare(left.getReturnType(), right.getReturnType(), CLASS_ORDERING)
.compare(
Arrays.asList(left.getParameterTypes()),
Arrays.asList(right.getParameterTypes()),
CLASS_ORDERING.lexicographical())
.result();
}
};
}
| apache-2.0 |
IHTSDO/snow-owl | snomed/com.b2international.snowowl.snomed.semanticengine.simpleast/src/com/b2international/snowowl/semanticengine/simpleast/normalform/AttributeClauseList.java | 1347 | /*
* Copyright 2011-2015 B2i Healthcare Pte Ltd, http://b2i.sg
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.b2international.snowowl.semanticengine.simpleast.normalform;
import java.util.List;
import com.b2international.snowowl.snomed.dsl.query.queryast.AttributeClause;
import com.google.common.collect.Lists;
/**
* Simple class to represent an attribute group.
*/
public class AttributeClauseList {
private final List<AttributeClause> attributeClauses = Lists.newArrayList();
public List<AttributeClause> getAttributeClauses() {
return attributeClauses;
}
@Override
public String toString() {
StringBuffer buf = new StringBuffer();
buf.append('{');
for (AttributeClause attribute : attributeClauses) {
buf.append(attribute.toString() );
}
buf.append('}');
return buf.toString();
}
} | apache-2.0 |
ckwen/je | je-spring-boot/je-spring-boot-web-exception/src/main/java/com/github/ckwen/je/spring/boot/web/exception/dto/ErrorInfo.java | 989 | package com.github.ckwen.je.spring.boot.web.exception.dto;
public class ErrorInfo<T> {
public static final Integer OK = 0;
public static final Integer ERROR = 100;
private Integer code;
private String message;
private String url;
private T data;
public String getUrl() {
return url;
}
public void setUrl(String url) {
this.url = url;
}
public static Integer getOK() {
return OK;
}
public static Integer getERROR() {
return ERROR;
}
public Integer getCode() {
return code;
}
public void setCode(Integer code) {
this.code = code;
}
public String getMessage() {
return message;
}
public void setMessage(String message) {
this.message = message;
}
public T getData() {
return data;
}
public void setData(T data) {
this.data = data;
}
} | apache-2.0 |
Imran-C/cassandra | src/java/org/apache/cassandra/utils/btree/BTree.java | 41053 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cassandra.utils.btree;
import java.util.*;
import com.google.common.base.Function;
import com.google.common.collect.Iterators;
import com.google.common.collect.Ordering;
import io.netty.util.Recycler;
import org.apache.cassandra.utils.ObjectSizes;
import static com.google.common.collect.Iterables.concat;
import static com.google.common.collect.Iterables.filter;
import static com.google.common.collect.Iterables.transform;
import static java.lang.Math.max;
import static java.lang.Math.min;
public class BTree
{
/**
* Leaf Nodes are a raw array of values: Object[V1, V1, ...,].
*
* Branch Nodes: Object[V1, V2, ..., child[<V1.key], child[<V2.key], ..., child[< Inf], size], where
* each child is another node, i.e., an Object[]. Thus, the value elements in a branch node are the
* first half of the array (minus one). In our implementation, each value must include its own key;
* we access these via Comparator, rather than directly.
*
* So we can quickly distinguish between leaves and branches, we require that leaf nodes are always an odd number
* of elements (padded with a null, if necessary), and branches are always an even number of elements.
*
* BTrees are immutable; updating one returns a new tree that reuses unmodified nodes.
*
* There are no references back to a parent node from its children. (This would make it impossible to re-use
* subtrees when modifying the tree, since the modified tree would need new parent references.)
* Instead, we store these references in a Path as needed when navigating the tree.
*/
// The maximum fan factor used for B-Trees
static final int FAN_SHIFT;
static
{
int fanfactor = 32;
if (System.getProperty("cassandra.btree.fanfactor") != null)
fanfactor = Integer.parseInt(System.getProperty("cassandra.btree.fanfactor"));
int shift = 1;
while (1 << shift < fanfactor)
shift += 1;
FAN_SHIFT = shift;
}
// NB we encode Path indexes as Bytes, so this needs to be less than Byte.MAX_VALUE / 2
static final int FAN_FACTOR = 1 << FAN_SHIFT;
static final int MINIMAL_NODE_SIZE = FAN_FACTOR >> 1;
// An empty BTree Leaf - which is the same as an empty BTree
static final Object[] EMPTY_LEAF = new Object[1];
// An empty BTree branch - used only for internal purposes in Modifier
static final Object[] EMPTY_BRANCH = new Object[] { null, new int[0] };
// direction of iteration
public static enum Dir
{
ASC, DESC;
public Dir invert() { return this == ASC ? DESC : ASC; }
public static Dir asc(boolean asc) { return asc ? ASC : DESC; }
public static Dir desc(boolean desc) { return desc ? DESC : ASC; }
}
public static Object[] empty()
{
return EMPTY_LEAF;
}
public static Object[] singleton(Object value)
{
return new Object[] { value };
}
public static <C, K extends C, V extends C> Object[] build(Collection<K> source, UpdateFunction<K, V> updateF)
{
return buildInternal(source, source.size(), updateF);
}
public static <C, K extends C, V extends C> Object[] build(Iterable<K> source, UpdateFunction<K, V> updateF)
{
return buildInternal(source, -1, updateF);
}
/**
* Creates a BTree containing all of the objects in the provided collection
*
* @param source the items to build the tree with. MUST BE IN STRICTLY ASCENDING ORDER.
* @param size the size of the source iterable
* @return a btree representing the contents of the provided iterable
*/
public static <C, K extends C, V extends C> Object[] build(Iterable<K> source, int size, UpdateFunction<K, V> updateF)
{
if (size < 0)
throw new IllegalArgumentException(Integer.toString(size));
return buildInternal(source, size, updateF);
}
/**
* As build(), except:
* @param size < 0 if size is unknown
*/
private static <C, K extends C, V extends C> Object[] buildInternal(Iterable<K> source, int size, UpdateFunction<K, V> updateF)
{
if ((size >= 0) & (size < FAN_FACTOR))
{
if (size == 0)
return EMPTY_LEAF;
// pad to odd length to match contract that all leaf nodes are odd
V[] values = (V[]) new Object[size | 1];
{
int i = 0;
for (K k : source)
values[i++] = updateF.apply(k);
}
updateF.allocated(ObjectSizes.sizeOfArray(values));
return values;
}
TreeBuilder builder = TreeBuilder.newInstance();
Object[] btree = builder.build(source, updateF, size);
return btree;
}
public static <C, K extends C, V extends C> Object[] update(Object[] btree,
Comparator<C> comparator,
Collection<K> updateWith,
UpdateFunction<K, V> updateF)
{
return update(btree, comparator, updateWith, updateWith.size(), updateF);
}
/**
* Returns a new BTree with the provided collection inserting/replacing as necessary any equal items
*
* @param btree the tree to update
* @param comparator the comparator that defines the ordering over the items in the tree
* @param updateWith the items to either insert / update. MUST BE IN STRICTLY ASCENDING ORDER.
* @param updateWithLength then number of elements in updateWith
* @param updateF the update function to apply to any pairs we are swapping, and maybe abort early
* @param <V>
* @return
*/
public static <C, K extends C, V extends C> Object[] update(Object[] btree,
Comparator<C> comparator,
Iterable<K> updateWith,
int updateWithLength,
UpdateFunction<K, V> updateF)
{
if (isEmpty(btree))
return build(updateWith, updateWithLength, updateF);
TreeBuilder builder = TreeBuilder.newInstance();
btree = builder.update(btree, comparator, updateWith, updateF);
return btree;
}
public static <K> Object[] merge(Object[] tree1, Object[] tree2, Comparator<? super K> comparator, UpdateFunction<K, K> updateF)
{
if (size(tree1) < size(tree2))
{
Object[] tmp = tree1;
tree1 = tree2;
tree2 = tmp;
}
return update(tree1, comparator, new BTreeSet<K>(tree2, comparator), updateF);
}
public static <V> Iterator<V> iterator(Object[] btree)
{
return iterator(btree, Dir.ASC);
}
public static <V> Iterator<V> iterator(Object[] btree, Dir dir)
{
return new BTreeSearchIterator<>(btree, null, dir);
}
public static <V> Iterator<V> iterator(Object[] btree, int lb, int ub, Dir dir)
{
return new BTreeSearchIterator<>(btree, null, dir, lb, ub);
}
public static <V> Iterable<V> iterable(Object[] btree)
{
return iterable(btree, Dir.ASC);
}
public static <V> Iterable<V> iterable(Object[] btree, Dir dir)
{
return () -> iterator(btree, dir);
}
public static <V> Iterable<V> iterable(Object[] btree, int lb, int ub, Dir dir)
{
return () -> iterator(btree, lb, ub, dir);
}
/**
* Returns an Iterator over the entire tree
*
* @param btree the tree to iterate over
* @param dir direction of iteration
* @param <V>
* @return
*/
public static <K, V> BTreeSearchIterator<K, V> slice(Object[] btree, Comparator<? super K> comparator, Dir dir)
{
return new BTreeSearchIterator<>(btree, comparator, dir);
}
/**
* @param btree the tree to iterate over
* @param comparator the comparator that defines the ordering over the items in the tree
* @param start the beginning of the range to return, inclusive (in ascending order)
* @param end the end of the range to return, exclusive (in ascending order)
* @param dir if false, the iterator will start at the last item and move backwards
* @return an Iterator over the defined sub-range of the tree
*/
public static <K, V extends K> BTreeSearchIterator<K, V> slice(Object[] btree, Comparator<? super K> comparator, K start, K end, Dir dir)
{
return slice(btree, comparator, start, true, end, false, dir);
}
/**
* @param btree the tree to iterate over
* @param comparator the comparator that defines the ordering over the items in the tree
* @param start low bound of the range
* @param startInclusive inclusivity of lower bound
* @param end high bound of the range
* @param endInclusive inclusivity of higher bound
* @param dir direction of iteration
* @return an Iterator over the defined sub-range of the tree
*/
public static <K, V extends K> BTreeSearchIterator<K, V> slice(Object[] btree, Comparator<? super K> comparator, K start, boolean startInclusive, K end, boolean endInclusive, Dir dir)
{
int inclusiveLowerBound = max(0,
start == null ? Integer.MIN_VALUE
: startInclusive ? ceilIndex(btree, comparator, start)
: higherIndex(btree, comparator, start));
int inclusiveUpperBound = min(size(btree) - 1,
end == null ? Integer.MAX_VALUE
: endInclusive ? floorIndex(btree, comparator, end)
: lowerIndex(btree, comparator, end));
return new BTreeSearchIterator<>(btree, comparator, dir, inclusiveLowerBound, inclusiveUpperBound);
}
/**
* @return the item in the tree that sorts as equal to the search argument, or null if no such item
*/
public static <V> V find(Object[] node, Comparator<? super V> comparator, V find)
{
while (true)
{
int keyEnd = getKeyEnd(node);
int i = Arrays.binarySearch((V[]) node, 0, keyEnd, find, comparator);
if (i >= 0)
return (V) node[i];
if (isLeaf(node))
return null;
i = -1 - i;
node = (Object[]) node[keyEnd + i];
}
}
/**
* Modifies the provided btree directly. THIS SHOULD NOT BE USED WITHOUT EXTREME CARE as BTrees are meant to be immutable.
* Finds and replaces the item provided by index in the tree.
*/
public static <V> void replaceInSitu(Object[] tree, int index, V replace)
{
// WARNING: if semantics change, see also InternalCursor.seekTo, which mirrors this implementation
if ((index < 0) | (index >= size(tree)))
throw new IndexOutOfBoundsException(index + " not in range [0.." + size(tree) + ")");
while (!isLeaf(tree))
{
final int[] sizeMap = getSizeMap(tree);
int boundary = Arrays.binarySearch(sizeMap, index);
if (boundary >= 0)
{
// exact match, in this branch node
assert boundary < sizeMap.length - 1;
tree[boundary] = replace;
return;
}
boundary = -1 -boundary;
if (boundary > 0)
{
assert boundary < sizeMap.length;
index -= (1 + sizeMap[boundary - 1]);
}
tree = (Object[]) tree[getChildStart(tree) + boundary];
}
assert index < getLeafKeyEnd(tree);
tree[index] = replace;
}
/**
* Modifies the provided btree directly. THIS SHOULD NOT BE USED WITHOUT EXTREME CARE as BTrees are meant to be immutable.
* Finds and replaces the provided item in the tree. Both should sort as equal to each other (although this is not enforced)
*/
public static <V> void replaceInSitu(Object[] node, Comparator<? super V> comparator, V find, V replace)
{
while (true)
{
int keyEnd = getKeyEnd(node);
int i = Arrays.binarySearch((V[]) node, 0, keyEnd, find, comparator);
if (i >= 0)
{
assert find == node[i];
node[i] = replace;
return;
}
if (isLeaf(node))
throw new NoSuchElementException();
i = -1 - i;
node = (Object[]) node[keyEnd + i];
}
}
/**
* Honours result semantics of {@link Arrays#binarySearch}, as though it were performed on the tree flattened into an array
* @return index of item in tree, or <tt>(-(<i>insertion point</i>) - 1)</tt> if not present
*/
public static <V> int findIndex(Object[] node, Comparator<? super V> comparator, V find)
{
int lb = 0;
while (true)
{
int keyEnd = getKeyEnd(node);
int i = Arrays.binarySearch((V[]) node, 0, keyEnd, find, comparator);
boolean exact = i >= 0;
if (isLeaf(node))
return exact ? lb + i : i - lb;
if (!exact)
i = -1 - i;
int[] sizeMap = getSizeMap(node);
if (exact)
return lb + sizeMap[i];
else if (i > 0)
lb += sizeMap[i - 1] + 1;
node = (Object[]) node[keyEnd + i];
}
}
/**
* @return the value at the index'th position in the tree, in tree order
*/
public static <V> V findByIndex(Object[] tree, int index)
{
// WARNING: if semantics change, see also InternalCursor.seekTo, which mirrors this implementation
if ((index < 0) | (index >= size(tree)))
throw new IndexOutOfBoundsException(index + " not in range [0.." + size(tree) + ")");
Object[] node = tree;
while (true)
{
if (isLeaf(node))
{
int keyEnd = getLeafKeyEnd(node);
assert index < keyEnd;
return (V) node[index];
}
int[] sizeMap = getSizeMap(node);
int boundary = Arrays.binarySearch(sizeMap, index);
if (boundary >= 0)
{
// exact match, in this branch node
assert boundary < sizeMap.length - 1;
return (V) node[boundary];
}
boundary = -1 -boundary;
if (boundary > 0)
{
assert boundary < sizeMap.length;
index -= (1 + sizeMap[boundary - 1]);
}
node = (Object[]) node[getChildStart(node) + boundary];
}
}
/* since we have access to binarySearch semantics within indexOf(), we can use this to implement
* lower/upper/floor/higher very trivially
*
* this implementation is *not* optimal; it requires two logarithmic traversals, although the second is much cheaper
* (having less height, and operating over only primitive arrays), and the clarity is compelling
*/
public static <V> int lowerIndex(Object[] btree, Comparator<? super V> comparator, V find)
{
int i = findIndex(btree, comparator, find);
if (i < 0)
i = -1 -i;
return i - 1;
}
public static <V> V lower(Object[] btree, Comparator<? super V> comparator, V find)
{
int i = lowerIndex(btree, comparator, find);
return i >= 0 ? findByIndex(btree, i) : null;
}
public static <V> int floorIndex(Object[] btree, Comparator<? super V> comparator, V find)
{
int i = findIndex(btree, comparator, find);
if (i < 0)
i = -2 -i;
return i;
}
public static <V> V floor(Object[] btree, Comparator<? super V> comparator, V find)
{
int i = floorIndex(btree, comparator, find);
return i >= 0 ? findByIndex(btree, i) : null;
}
public static <V> int higherIndex(Object[] btree, Comparator<? super V> comparator, V find)
{
int i = findIndex(btree, comparator, find);
if (i < 0) i = -1 -i;
else i++;
return i;
}
public static <V> V higher(Object[] btree, Comparator<? super V> comparator, V find)
{
int i = higherIndex(btree, comparator, find);
return i < size(btree) ? findByIndex(btree, i) : null;
}
public static <V> int ceilIndex(Object[] btree, Comparator<? super V> comparator, V find)
{
int i = findIndex(btree, comparator, find);
if (i < 0)
i = -1 -i;
return i;
}
public static <V> V ceil(Object[] btree, Comparator<? super V> comparator, V find)
{
int i = ceilIndex(btree, comparator, find);
return i < size(btree) ? findByIndex(btree, i) : null;
}
// UTILITY METHODS
// get the upper bound we should search in for keys in the node
static int getKeyEnd(Object[] node)
{
if (isLeaf(node))
return getLeafKeyEnd(node);
else
return getBranchKeyEnd(node);
}
// get the last index that is non-null in the leaf node
static int getLeafKeyEnd(Object[] node)
{
int len = node.length;
return node[len - 1] == null ? len - 1 : len;
}
// return the boundary position between keys/children for the branch node
// == number of keys, as they are indexed from zero
static int getBranchKeyEnd(Object[] branchNode)
{
return (branchNode.length / 2) - 1;
}
/**
* @return first index in a branch node containing child nodes
*/
static int getChildStart(Object[] branchNode)
{
return getBranchKeyEnd(branchNode);
}
/**
* @return last index + 1 in a branch node containing child nodes
*/
static int getChildEnd(Object[] branchNode)
{
return branchNode.length - 1;
}
/**
* @return number of children in a branch node
*/
static int getChildCount(Object[] branchNode)
{
return branchNode.length / 2;
}
/**
* @return the size map for the branch node
*/
static int[] getSizeMap(Object[] branchNode)
{
return (int[]) branchNode[getChildEnd(branchNode)];
}
/**
* @return the size map for the branch node
*/
static int lookupSizeMap(Object[] branchNode, int index)
{
return getSizeMap(branchNode)[index];
}
// get the size from the btree's index (fails if not present)
public static int size(Object[] tree)
{
if (isLeaf(tree))
return getLeafKeyEnd(tree);
int length = tree.length;
// length - 1 == getChildEnd == getPositionOfSizeMap
// (length / 2) - 1 == getChildCount - 1 == position of full tree size
// hard code this, as will be used often;
return ((int[]) tree[length - 1])[(length / 2) - 1];
}
public static long sizeOfStructureOnHeap(Object[] tree)
{
long size = ObjectSizes.sizeOfArray(tree);
if (isLeaf(tree))
return size;
for (int i = getChildStart(tree) ; i < getChildEnd(tree) ; i++)
size += sizeOfStructureOnHeap((Object[]) tree[i]);
return size;
}
// returns true if the provided node is a leaf, false if it is a branch
static boolean isLeaf(Object[] node)
{
return (node.length & 1) == 1;
}
public static boolean isEmpty(Object[] tree)
{
return tree == EMPTY_LEAF;
}
public static int depth(Object[] tree)
{
int depth = 1;
while (!isLeaf(tree))
{
depth++;
tree = (Object[]) tree[getKeyEnd(tree)];
}
return depth;
}
/**
* Fill the target array with the contents of the provided subtree, in ascending order, starting at targetOffset
* @param tree source
* @param target array
* @param targetOffset offset in target array
* @return number of items copied (size of tree)
*/
public static int toArray(Object[] tree, Object[] target, int targetOffset)
{
return toArray(tree, 0, size(tree), target, targetOffset);
}
public static int toArray(Object[] tree, int treeStart, int treeEnd, Object[] target, int targetOffset)
{
if (isLeaf(tree))
{
int count = treeEnd - treeStart;
System.arraycopy(tree, treeStart, target, targetOffset, count);
return count;
}
int newTargetOffset = targetOffset;
int childCount = getChildCount(tree);
int childOffset = getChildStart(tree);
for (int i = 0 ; i < childCount ; i++)
{
int childStart = treeIndexOffsetOfChild(tree, i);
int childEnd = treeIndexOfBranchKey(tree, i);
if (childStart <= treeEnd && childEnd >= treeStart)
{
newTargetOffset += toArray((Object[]) tree[childOffset + i], max(0, treeStart - childStart), min(childEnd, treeEnd) - childStart,
target, newTargetOffset);
if (treeStart <= childEnd && treeEnd > childEnd) // this check will always fail for the non-existent key
target[newTargetOffset++] = tree[i];
}
}
return newTargetOffset - targetOffset;
}
// simple class for avoiding duplicate transformation work
private static class FiltrationTracker<V> implements Function<V, V>
{
final Function<? super V, ? extends V> wrapped;
int index;
boolean failed;
private FiltrationTracker(Function<? super V, ? extends V> wrapped)
{
this.wrapped = wrapped;
}
public V apply(V i)
{
V o = wrapped.apply(i);
if (o != null) index++;
else failed = true;
return o;
}
}
/**
* Takes a btree and transforms it using the provided function, filtering out any null results.
* The result of any transformation must sort identically wrt the other results as their originals
*/
public static <V> Object[] transformAndFilter(Object[] btree, Function<? super V, ? extends V> function)
{
if (isEmpty(btree))
return btree;
// TODO: can be made more efficient
FiltrationTracker<V> wrapped = new FiltrationTracker<>(function);
Object[] result = transformAndFilter(btree, wrapped);
if (!wrapped.failed)
return result;
// take the already transformed bits from the head of the partial result
Iterable<V> head = iterable(result, 0, wrapped.index - 1, Dir.ASC);
// and concatenate with remainder of original tree, with transformation applied
Iterable<V> remainder = iterable(btree, wrapped.index + 1, size(btree) - 1, Dir.ASC);
remainder = filter(transform(remainder, function), (x) -> x != null);
Iterable<V> build = concat(head, remainder);
return buildInternal(build, -1, UpdateFunction.<V>noOp());
}
private static <V> Object[] transformAndFilter(Object[] btree, FiltrationTracker<V> function)
{
Object[] result = btree;
boolean isLeaf = isLeaf(btree);
int childOffset = isLeaf ? Integer.MAX_VALUE : getChildStart(btree);
int limit = isLeaf ? getLeafKeyEnd(btree) : btree.length - 1;
for (int i = 0 ; i < limit ; i++)
{
// we want to visit in iteration order, so we visit our key nodes inbetween our children
int idx = isLeaf ? i : (i / 2) + (i % 2 == 0 ? childOffset : 0);
Object current = btree[idx];
Object updated = idx < childOffset ? function.apply((V) current) : transformAndFilter((Object[]) current, function);
if (updated != current)
{
if (result == btree)
result = btree.clone();
result[idx] = updated;
}
if (function.failed)
return result;
}
return result;
}
public static boolean equals(Object[] a, Object[] b)
{
return size(a) == size(b) && Iterators.elementsEqual(iterator(a), iterator(b));
}
public static int hashCode(Object[] btree)
{
// we can't just delegate to Arrays.deepHashCode(),
// because two equivalent trees may be represented by differently shaped trees
int result = 1;
for (Object v : iterable(btree))
result = 31 * result + Objects.hashCode(v);
return result;
}
/**
* tree index => index of key wrt all items in the tree laid out serially
*
* This version of the method permits requesting out-of-bounds indexes, -1 and size
* @param root to calculate tree index within
* @param keyIndex root-local index of key to calculate tree-index
* @return the number of items preceding the key in the whole tree of root
*/
public static int treeIndexOfKey(Object[] root, int keyIndex)
{
if (isLeaf(root))
return keyIndex;
int[] sizeMap = getSizeMap(root);
if ((keyIndex >= 0) & (keyIndex < sizeMap.length))
return sizeMap[keyIndex];
// we support asking for -1 or size, so that we can easily use this for iterator bounds checking
if (keyIndex < 0)
return -1;
return sizeMap[keyIndex - 1] + 1;
}
/**
* @param keyIndex node-local index of the key to calculate index of
* @return keyIndex; this method is here only for symmetry and clarity
*/
public static int treeIndexOfLeafKey(int keyIndex)
{
return keyIndex;
}
/**
* @param root to calculate tree-index within
* @param keyIndex root-local index of key to calculate tree-index of
* @return the number of items preceding the key in the whole tree of root
*/
public static int treeIndexOfBranchKey(Object[] root, int keyIndex)
{
return lookupSizeMap(root, keyIndex);
}
/**
* @param root to calculate tree-index within
* @param childIndex root-local index of *child* to calculate tree-index of
* @return the number of items preceding the child in the whole tree of root
*/
public static int treeIndexOffsetOfChild(Object[] root, int childIndex)
{
if (childIndex == 0)
return 0;
return 1 + lookupSizeMap(root, childIndex - 1);
}
final static Recycler<Builder> builderRecycler = new Recycler<Builder>()
{
protected Builder newObject(Handle handle)
{
return new Builder(handle);
}
};
public static <V> Builder<V> builder(Comparator<? super V> comparator)
{
Builder<V> builder = builderRecycler.get();
builder.reuse(comparator);
return builder;
}
public static <V> Builder<V> builder(Comparator<? super V> comparator, int initialCapacity)
{
return builder(comparator);
}
public static class Builder<V>
{
// a user-defined bulk resolution, to be applied manually via resolve()
public static interface Resolver
{
// can return a different output type to input, so long as sort order is maintained
// if a resolver is present, this method will be called for every sequence of equal inputs
// even those with only one item
Object resolve(Object[] array, int lb, int ub);
}
// a user-defined resolver that is applied automatically on encountering two duplicate values
public static interface QuickResolver<V>
{
// can return a different output type to input, so long as sort order is maintained
// if a resolver is present, this method will be called for every sequence of equal inputs
// even those with only one item
V resolve(V a, V b);
}
Comparator<? super V> comparator;
Object[] values;
int count;
boolean detected = true; // true if we have managed to cheaply ensure sorted (+ filtered, if resolver == null) as we have added
boolean auto = true; // false if the user has promised to enforce the sort order and resolve any duplicates
QuickResolver<V> quickResolver;
final Recycler.Handle recycleHandle;
private Builder(Recycler.Handle handle)
{
this.recycleHandle = handle;
this.values = new Object[16];
}
public Builder<V> setQuickResolver(QuickResolver<V> quickResolver)
{
this.quickResolver = quickResolver;
return this;
}
public void recycle()
{
if (recycleHandle != null)
builderRecycler.recycle(this, recycleHandle);
}
private void reuse(Comparator<? super V> comparator)
{
this.comparator = comparator;
quickResolver = null;
Arrays.fill(values, 0, count, null);
count = 0;
detected = true;
auto = true;
}
public Builder<V> auto(boolean auto)
{
this.auto = auto;
return this;
}
public Builder<V> add(V v)
{
if (count == values.length)
values = Arrays.copyOf(values, count * 2);
Object[] values = this.values;
int prevCount = this.count++;
values[prevCount] = v;
if (auto && detected && prevCount > 0)
{
V prev = (V) values[prevCount - 1];
int c = comparator.compare(prev, v);
if (c == 0 && auto)
{
count = prevCount;
if (quickResolver != null)
values[prevCount - 1] = quickResolver.resolve(prev, v);
}
else if (c > 0)
{
detected = false;
}
}
return this;
}
public Builder<V> addAll(Collection<V> add)
{
if (auto && add instanceof SortedSet && equalComparators(comparator, ((SortedSet) add).comparator()))
{
// if we're a SortedSet, permit quick order-preserving addition of items
// if we collect all duplicates, don't bother as merge will necessarily be more expensive than sorting at end
return mergeAll(add, add.size());
}
detected = false;
if (values.length < count + add.size())
values = Arrays.copyOf(values, max(count + add.size(), count * 2));
for (V v : add)
values[count++] = v;
return this;
}
private static boolean equalComparators(Comparator<?> a, Comparator<?> b)
{
return a == b || (isNaturalComparator(a) && isNaturalComparator(b));
}
private static boolean isNaturalComparator(Comparator<?> a)
{
return a == null || a == Comparator.naturalOrder() || a == Ordering.natural();
}
// iter must be in sorted order!
private Builder<V> mergeAll(Iterable<V> add, int addCount)
{
assert auto;
// ensure the existing contents are in order
autoEnforce();
int curCount = count;
// we make room for curCount * 2 + addCount, so that we can copy the current values to the end
// if necessary for continuing the merge, and have the new values directly after the current value range
if (values.length < curCount * 2 + addCount)
values = Arrays.copyOf(values, max(curCount * 2 + addCount, curCount * 3));
if (add instanceof BTreeSet)
{
// use btree set's fast toArray method, to append directly
((BTreeSet) add).toArray(values, curCount);
}
else
{
// consider calling toArray() and System.arraycopy
int i = curCount;
for (V v : add)
values[i++] = v;
}
return mergeAll(addCount);
}
private Builder<V> mergeAll(int addCount)
{
Object[] a = values;
int addOffset = count;
int i = 0, j = addOffset;
int curEnd = addOffset, addEnd = addOffset + addCount;
// save time in cases where we already have a subset, by skipping dir
while (i < curEnd && j < addEnd)
{
V ai = (V) a[i], aj = (V) a[j];
// in some cases, such as Columns, we may have identity supersets, so perform a cheap object-identity check
int c = ai == aj ? 0 : comparator.compare(ai, aj);
if (c > 0)
break;
else if (c == 0)
{
if (quickResolver != null)
a[i] = quickResolver.resolve(ai, aj);
j++;
}
i++;
}
if (j == addEnd)
return this; // already a superset of the new values
// otherwise, copy the remaining existing values to the very end, freeing up space for merge result
int newCount = i;
System.arraycopy(a, i, a, addEnd, count - i);
curEnd = addEnd + (count - i);
i = addEnd;
while (i < curEnd && j < addEnd)
{
V ai = (V) a[i];
V aj = (V) a[j];
// could avoid one comparison if we cared, but would make this ugly
int c = comparator.compare(ai, aj);
if (c == 0)
{
Object newValue = quickResolver == null ? ai : quickResolver.resolve(ai, aj);
a[newCount++] = newValue;
i++;
j++;
}
else
{
a[newCount++] = c < 0 ? a[i++] : a[j++];
}
}
// exhausted one of the inputs; fill in remainder of the other
if (i < curEnd)
{
System.arraycopy(a, i, a, newCount, curEnd - i);
newCount += curEnd - i;
}
else if (j < addEnd)
{
if (j != newCount)
System.arraycopy(a, j, a, newCount, addEnd - j);
newCount += addEnd - j;
}
count = newCount;
return this;
}
public boolean isEmpty()
{
return count == 0;
}
public Builder<V> reverse()
{
assert !auto;
int mid = count / 2;
for (int i = 0 ; i < mid ; i++)
{
Object t = values[i];
values[i] = values[count - (1 + i)];
values[count - (1 + i)] = t;
}
return this;
}
public Builder<V> sort()
{
Arrays.sort((V[]) values, 0, count, comparator);
return this;
}
// automatically enforce sorted+filtered
private void autoEnforce()
{
if (!detected && count > 1)
{
sort();
int prevIdx = 0;
V prev = (V) values[0];
for (int i = 1 ; i < count ; i++)
{
V next = (V) values[i];
if (comparator.compare(prev, next) != 0)
values[++prevIdx] = prev = next;
else if (quickResolver != null)
values[prevIdx] = prev = quickResolver.resolve(prev, next);
}
count = prevIdx + 1;
}
detected = true;
}
public Builder<V> resolve(Resolver resolver)
{
if (count > 0)
{
int c = 0;
int prev = 0;
for (int i = 1 ; i < count ; i++)
{
if (comparator.compare((V) values[i], (V) values[prev]) != 0)
{
values[c++] = resolver.resolve((V[]) values, prev, i);
prev = i;
}
}
values[c++] = resolver.resolve((V[]) values, prev, count);
count = c;
}
return this;
}
public Object[] build()
{
try
{
if (auto)
autoEnforce();
return BTree.build(Arrays.asList(values).subList(0, count), UpdateFunction.noOp());
}
finally
{
this.recycle();
}
}
}
/** simple static wrapper to calls to cmp.compare() which checks if either a or b are Special (i.e. represent an infinity) */
static <V> int compare(Comparator<V> cmp, Object a, Object b)
{
if (a == b)
return 0;
if (a == NEGATIVE_INFINITY | b == POSITIVE_INFINITY)
return -1;
if (b == NEGATIVE_INFINITY | a == POSITIVE_INFINITY)
return 1;
return cmp.compare((V) a, (V) b);
}
static Object POSITIVE_INFINITY = new Object();
static Object NEGATIVE_INFINITY = new Object();
public static boolean isWellFormed(Object[] btree, Comparator<? extends Object> cmp)
{
return isWellFormed(cmp, btree, true, NEGATIVE_INFINITY, POSITIVE_INFINITY);
}
private static boolean isWellFormed(Comparator<?> cmp, Object[] node, boolean isRoot, Object min, Object max)
{
if (cmp != null && !isNodeWellFormed(cmp, node, min, max))
return false;
if (isLeaf(node))
{
if (isRoot)
return node.length <= FAN_FACTOR + 1;
return node.length >= FAN_FACTOR / 2 && node.length <= FAN_FACTOR + 1;
}
final int keyCount = getBranchKeyEnd(node);
if ((!isRoot && keyCount < FAN_FACTOR / 2) || keyCount > FAN_FACTOR + 1)
return false;
int type = 0;
int size = -1;
int[] sizeMap = getSizeMap(node);
// compare each child node with the branch element at the head of this node it corresponds with
for (int i = getChildStart(node); i < getChildEnd(node) ; i++)
{
Object[] child = (Object[]) node[i];
size += size(child) + 1;
if (sizeMap[i - getChildStart(node)] != size)
return false;
Object localmax = i < node.length - 2 ? node[i - getChildStart(node)] : max;
if (!isWellFormed(cmp, child, false, min, localmax))
return false;
type |= isLeaf(child) ? 1 : 2;
min = localmax;
}
return type < 3; // either all leaves or all branches but not a mix
}
private static boolean isNodeWellFormed(Comparator<?> cmp, Object[] node, Object min, Object max)
{
Object previous = min;
int end = getKeyEnd(node);
for (int i = 0; i < end; i++)
{
Object current = node[i];
if (compare(cmp, previous, current) >= 0)
return false;
previous = current;
}
return compare(cmp, previous, max) < 0;
}
}
| apache-2.0 |
hortonworks/cloudbreak | orchestrator-salt/src/main/java/com/sequenceiq/cloudbreak/orchestrator/salt/poller/join/FingerprintCollector.java | 598 | package com.sequenceiq.cloudbreak.orchestrator.salt.poller.join;
import java.util.List;
import com.sequenceiq.cloudbreak.orchestrator.exception.CloudbreakOrchestratorFailedException;
import com.sequenceiq.cloudbreak.orchestrator.salt.client.SaltConnector;
import com.sequenceiq.cloudbreak.orchestrator.salt.domain.FingerprintsResponse;
import com.sequenceiq.cloudbreak.orchestrator.salt.domain.Minion;
public interface FingerprintCollector {
FingerprintsResponse collectFingerprintFromMinions(SaltConnector sc, List<Minion> minionsToAccept) throws CloudbreakOrchestratorFailedException;
}
| apache-2.0 |
jbonevich/ecs-local | src/main/java/com/bonevich/ecslocal/api/ListClusters.java | 1303 | /*
* Copyright 2017 Jeffrey Bonevich
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.bonevich.ecslocal.api;
import com.amazonaws.services.ecs.model.ListClustersRequest;
import com.amazonaws.services.ecs.model.ListClustersResult;
import com.bonevich.ecslocal.application.EcsApplication;
public class ListClusters implements EcsApi<ListClustersRequest> {
private final EcsApplication application;
public ListClusters(EcsApplication application) {
this.application = application;
}
@Override
public ListClustersResult execute(ListClustersRequest request) {
return new ListClustersResult().withClusterArns(application.listClusters());
}
@Override
public void validate(ListClustersRequest request) {
// nop
}
}
| apache-2.0 |
ifnul/ums-backend | is-lnu-rest-api/src/main/java/org/lnu/is/web/rest/processor/resolver/parameters/MultipleParameterRetriever.java | 359 | package org.lnu.is.web.rest.processor.resolver.parameters;
import java.util.List;
/**
* Interface for work with Multiple parameters.
*/
public interface MultipleParameterRetriever {
/**
* Single method for parsing multi parameter value.
* @param values value
* @return value.
*/
List<?> getMultiParameter(String[] values);
}
| apache-2.0 |
11xor6/presto | plugin/trino-tpch/src/main/java/io/trino/plugin/tpch/TpchPageSourceProvider.java | 1804 | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.trino.plugin.tpch;
import io.trino.spi.connector.ColumnHandle;
import io.trino.spi.connector.ConnectorPageSource;
import io.trino.spi.connector.ConnectorPageSourceProvider;
import io.trino.spi.connector.ConnectorSession;
import io.trino.spi.connector.ConnectorSplit;
import io.trino.spi.connector.ConnectorTableHandle;
import io.trino.spi.connector.ConnectorTransactionHandle;
import io.trino.spi.connector.DynamicFilter;
import java.util.List;
public class TpchPageSourceProvider
implements ConnectorPageSourceProvider
{
private final TpchRecordSetProvider tpchRecordSetProvider = new TpchRecordSetProvider();
private final int maxRowsPerPage;
TpchPageSourceProvider(int maxRowsPerPage)
{
this.maxRowsPerPage = maxRowsPerPage;
}
@Override
public ConnectorPageSource createPageSource(
ConnectorTransactionHandle transaction,
ConnectorSession session,
ConnectorSplit split,
ConnectorTableHandle table,
List<ColumnHandle> columns,
DynamicFilter dynamicFilter)
{
return new LazyRecordPageSource(maxRowsPerPage, tpchRecordSetProvider.getRecordSet(transaction, session, split, table, columns));
}
}
| apache-2.0 |
apache/openejb | container/openejb-core/src/main/java/org/apache/openejb/core/ivm/IntraVmProxy.java | 1008 | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openejb.core.ivm;
import java.io.ObjectStreamException;
public interface IntraVmProxy extends java.io.Serializable {
public Object writeReplace() throws ObjectStreamException;
}
| apache-2.0 |
Deutsche-Digitale-Bibliothek/ddb-backend | IngestMultiplexerServer/src/main/java/de/fhg/iais/cortex/im/worker/RestCommand.java | 10057 | package de.fhg.iais.cortex.im.worker;
import java.util.concurrent.atomic.AtomicLong;
import org.json.JSONObject;
import de.fhg.iais.commons.dbc.Check;
public class RestCommand {
public static final AtomicLong sequenceGenerator = new AtomicLong(0);
public static final RestCommand NULL = new RestCommand(RestOp.NULL, " ", " ", " ", " ");
private final RestOp op;
private final long sequenceNumber;
private final long creationDate;
private final String providerId;
private final String ingestId;
private final String revisionId;
private final String productionLine;
private final JSONObject result;
/**
* create an empty RestCommand. Data will be filled in on command of the "master" thread of the {@link IngestMultiplexerServer} (a non-terminating loop executing
* commands from the REST interface and
* sending ingest commands to the worker threads; a loop that must <b>never</b> be blocked),<br>
* when a response is generated for a status question (see {@link #answer(JSONObject)}. <b>This
* constructor should only be used by the master thread</b>
*/
private RestCommand(RestCommand restCommand, JSONObject result) {
this.sequenceNumber = restCommand.sequenceNumber;
this.creationDate = restCommand.creationDate;
this.op = restCommand.op;
this.providerId = restCommand.providerId;
this.ingestId = restCommand.ingestId;
this.revisionId = restCommand.revisionId;
this.productionLine = restCommand.productionLine;
this.result = result;
}
/**
* create a RestCommand to be processed by the IngestMultiplexerServer. Used by factory methods.
*
* @param op defines the command to be executed
* @param providerId
* @param ingestId
* @param revisionId
* @param productionLine may be null, in this case: applies to ALL production lines
*/
public RestCommand(RestOp op, String providerId, String ingestId, String revisionId, String productionLine) {
Check.notNull(op);
Check.notNull(providerId);
Check.notNull(ingestId);
Check.notNull(revisionId);
// productionLine may be null
this.sequenceNumber = sequenceGenerator.incrementAndGet();
this.creationDate = System.currentTimeMillis();
this.op = op;
this.providerId = providerId;
this.ingestId = ingestId;
this.revisionId = revisionId;
this.productionLine = productionLine;
this.result = null;
}
/**
* <b>Factory method:</b> create a RestCommand to be processed by the IngestMultiplexerServer.
*
* @param op defines the command to be executed
* @param providerId
* @param ingestId
* @param revisionId
* @param productionLine may be null, in this case: applies to ALL production lines
*/
public static RestCommand make(RestOp op, String providerId, String ingestId, String revisionId, String productionLine) {
return new RestCommand(op, providerId, ingestId, revisionId, productionLine);
}
/**
* create a INGESTS RestCommand to be processed by the IngestMultiplexerServer. This returns information about
* all ingests that are active or have been active since the last (re-)start of the {@link IngestMultiplexerServer}, for administration use. Used by a REST
* service.<br>
* This is a non-critical command, i.e. a command that may be delayed.
*/
public static RestCommand makeIngests() {
return new RestCommand(RestOp.INGESTS, " ", " ", " ", " ");
}
/**
* create a answer {@link RestCommand}, that matches the identifying key of the request- {@link RestCommand}. The identifying key is the tuple of op,
* providerId, ingestId, revisionId and
* productionLine.<br>
* <br>
* This method is used <b>only</b> for <b>non-critical commands</b>.
*
* @param result the result JSON-object generated by the master thread
* @return the answer object
*/
public RestCommand answer(JSONObject jsonResult) {
return new RestCommand(this, jsonResult);
}
/**
* return the sequence number, that uniquely identifies a command
*
* @return the sequence number
*/
public long getSequenceNumber() {
return this.sequenceNumber;
}
/**
* return the creation data of the request, not the creation data of the answer.
*
* @return the creation data of the request
*/
public long getCreationDate() {
return this.creationDate;
}
/**
* @return the revision identifier, consisting of providerId, ingestId and revisionId
*/
public String[] getRevisionIdentifier() {
String[] resultIdentifier = new String[3];
resultIdentifier[0] = this.providerId;
resultIdentifier[1] = this.ingestId;
resultIdentifier[2] = this.revisionId;
return resultIdentifier;
}
/**
* @return the operation, that defines the command to be executed
*/
public RestOp getOp() {
return this.op;
}
/**
* @return the provider id
*/
public String getProviderId() {
return this.providerId;
}
/**
* @return the ingest id
*/
public String getIngestId() {
return this.ingestId;
}
/**
* @return the revision id
*/
public String getRevisionId() {
return this.revisionId;
}
/**
* @return the name of the production line. May be null.
*/
public String getProductionLine() {
return this.productionLine;
}
/**
* return the result of this command.<br>
* A command is generated by a REST service. The result is generated by the {@link IngestMultiplexerServer}'s
* master thread and stored in the resonse object (see {@link RestCommand#answer(JSONObject)}
*
* @return the result of this command.<br>
*/
public JSONObject getResult() {
return this.result;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + this.ingestId.hashCode();
result = prime * result + this.op.hashCode();
result = prime * result + this.providerId.hashCode();
result = prime * result + this.revisionId.hashCode();
result = prime * result + ((this.productionLine == null) ? 0 : this.productionLine.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if ( this == obj ) {
return true;
}
if ( obj == null ) {
return false;
}
if ( getClass() != obj.getClass() ) {
return false;
}
RestCommand other = (RestCommand) obj;
if ( this.op != other.op ) {
return false;
}
if ( !this.ingestId.equals(other.ingestId) ) {
return false;
}
if ( !this.providerId.equals(other.providerId) ) {
return false;
}
if ( !this.revisionId.equals(other.revisionId) ) {
return false;
}
if ( this.productionLine == null ) {
return other.productionLine == null;
}
return this.productionLine.equals(other.productionLine);
}
@Override
public String toString() {
return "RestCommand [op="
+ this.op
+ ", providerId="
+ this.providerId
+ ", ingestId="
+ this.ingestId
+ ", revisionId="
+ this.revisionId
+ ", productionLine="
+ this.productionLine
+ "]";
}
public static enum RestOp {
/**
* start a revision or resume a revision, that has been paused
*/
START,
/**
* do not allow further SIPs to be ingested. SIPs that are being ingested continue until they finish.
* If the revision isn't started already, do nothing.
*/
PAUSE,
/**
* Abort a revision of an ingest. After a revision of an ingest has been aborted, it is illegal to
* send SIPs to ingestors anymore. The ingest is now available for a new revision to be started. It is
* possible, that SIPs that have been sent to an ingestor, will finish in the future and hit the
* archive. Furthermore, it is legal, that the ASC will sent SIPs to the aborted revision in the
* future. But these SIPs will be never processed.
*/
ABORT,
/**
* a revision is opened by a REST-call "open" by the ASC. Later a administrator will issue a REST-call
* "start". In the time between these calls the IM master thread must not know anything about this
* revision to be worked upon later. But for status commands it is nice to be informed about the
* possibility of a running revision in the future ... The master thread is informed about such a
* revision by this command.
*/
INFORM,
/**
* data describing the state of the (revision of the) ingest.
*/
DATA,
/**
* statistics data describing the (revision of the) ingest.
*/
STAT,
/**
* resynchronize: write all data cached so far into the file system.
*/
RESTART,
/**
* return all ingests, that<br>
* - had not terminated when the IngestMultiplexerServer started, or<br>
* - have been added to the IngestMultiplexerServer (start command)
*/
INGESTS,
/**
* if the revision exists, put it into the central IM map.
*/
MAKE_REVISION_VISIBLE,
/**
* check all SipReaders. Remove SipReaders whose data has been removed from the IM's filesystems.
*/
REMOVE_DELETED_REVISIONS,
/**
* nothing to be done
*/
NULL;
}
} | apache-2.0 |
hfcipriano/AFND_por_AFD | src/main/java/guru/nidi/graphviz/attribute/Arrow.java | 2159 | /*
* Copyright (C) 2015 Stefan Niederhauser (nidin@gmx.ch)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package guru.nidi.graphviz.attribute;
/**
*
*/
public class Arrow extends SimpleAttribute<String> {
private Arrow(String key, String value) {
super(key, value);
}
private Arrow(String value) {
super("arrowhead", value);
}
public Arrow tail() {
return key("arrowtail");
}
public Arrow open() {
return value(value.charAt(0) == 'o' ? value : ("o" + value));
}
public Arrow left() {
return arrowDir("l");
}
public Arrow right() {
return arrowDir("r");
}
public Arrow and(Arrow arrow) {
return value(arrow.value + value);
}
private Arrow arrowDir(String dir) {
switch (value.charAt(0)) {
case 'l':
case 'r':
return value(dir + value.substring(1));
case 'o':
final char s = value.charAt(1);
return value("o" + dir + (s == 'r' || s == 'l' ? value.substring(2) : value.substring(1)));
default:
return value(dir + value);
}
}
public static final Arrow
BOX = new Arrow("box"),
CROW = new Arrow("crow"),
CURVE = new Arrow("curve"),
DIAMOND = new Arrow("diamond"),
DOT = new Arrow("dot"),
ICURVE = new Arrow("icurve"),
INV = new Arrow("inv"),
NONE = new Arrow("none"),
NORMAL = new Arrow("normal"),
TEE = new Arrow("tee"),
VEE = new Arrow("vee");
}
| apache-2.0 |
saschalamp/istats | istats/src/main/java/de/lampware/racing/istats/service/GlobalInformationService.java | 2882 | /**
* Copyright © 2016 Sascha Lamp (sascha.lamp@outlook.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.lampware.racing.istats.service;
import java.util.Optional;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
import de.lampware.racing.istats.factory.IracingModelFactory;
import de.lampware.racing.istats.factory.JsonConstants;
import de.lampware.racing.istats.model.GlobalInformation;
import de.lampware.racing.istats.parameters.EmptyParameters;
/**
* TODO JavaDoc
*
* @author lamp.
*/
public class GlobalInformationService extends HttpGetService<EmptyParameters, GlobalInformation> {
public GlobalInformationService(IracingModelFactory<GlobalInformation> modelFactory) {
super(modelFactory);
}
@Override
IracingServiceType getServiceType() {
return IracingServiceType.MEMBERSITE;
}
@Override
String getServiceLocation() {
return "Home.do";
}
@Override
JsonElement createJson(String output) {
Document document = Jsoup.parse(output);
Elements scriptElements = document.getElementsByTag("script");
Element scriptTag = scriptElements.get(25);
String html = scriptTag.html();
JsonObject jsonObject = new JsonObject();
JsonParser jsonParser = new JsonParser();
retrieveListingAsJson(html, jsonParser, "Track")
.ifPresent(element -> jsonObject.add(JsonConstants.TRACKS_KEY, element));
retrieveListingAsJson(html, jsonParser, "Car")
.ifPresent(element -> jsonObject.add(JsonConstants.CARS_KEY, element));
return jsonObject;
}
private Optional<JsonElement> retrieveListingAsJson(String html, JsonParser jsonParser, String listingName) {
String stringPattern = "var " + listingName + "Listing = extractJSON\\('(.+)'\\);";
Pattern pattern = Pattern.compile(stringPattern);
Matcher matcher = pattern.matcher(html);
if (matcher.find()) {
String group = matcher.group(1);
return Optional.of(jsonParser.parse(group));
}
return Optional.empty();
}
}
| apache-2.0 |
mfpdev/mfp-advanced-adapters-samples | mfp-adapters-spring-integration/src/main/java/com/github/mfpdev/adapters/spring/integration/JAXRSResourcesRegistry.java | 805 | /**
* © Copyright 2016 IBM Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.mfpdev.adapters.spring.integration;
/**
* Created by yotamm on 17/02/16.
*/
public interface JAXRSResourcesRegistry {
public Object[] getResources();
}
| apache-2.0 |
ieewbbwe/studySpace | coreLib/src/main/java/com/android_mobile/capture/DecodeThread.java | 2515 | /*
* Copyright (C) 2008 ZXing authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.android_mobile.capture;
import android.os.Handler;
import android.os.Looper;
import com.google.zxing.BarcodeFormat;
import com.google.zxing.DecodeHintType;
import com.google.zxing.ResultPointCallback;
import java.util.Hashtable;
import java.util.Vector;
import java.util.concurrent.CountDownLatch;
/**
* This thread does all the heavy lifting of decoding the images.
* @author dswitkin@google.com (Daniel Switkin)
*/
final class DecodeThread extends Thread {
public static final String BARCODE_BITMAP = "barcode_bitmap";
private final CaptureActivity activity;
private final Hashtable<DecodeHintType, Object> hints;
private Handler handler;
private final CountDownLatch handlerInitLatch;
DecodeThread(CaptureActivity activity, Vector<BarcodeFormat> decodeFormats,
String characterSet, ResultPointCallback resultPointCallback) {
this.activity = activity;
handlerInitLatch = new CountDownLatch(1);
hints = new Hashtable<DecodeHintType, Object>(3);
// The prefs can't change while the thread is running, so pick them up
// once here.
if (decodeFormats == null || decodeFormats.isEmpty()) {
decodeFormats = new Vector<BarcodeFormat>();
decodeFormats.addAll(DecodeFormatManager.ONE_D_FORMATS);
decodeFormats.addAll(DecodeFormatManager.QR_CODE_FORMATS);
decodeFormats.addAll(DecodeFormatManager.DATA_MATRIX_FORMATS);
}
hints.put(DecodeHintType.POSSIBLE_FORMATS, decodeFormats);
if (characterSet != null) {
hints.put(DecodeHintType.CHARACTER_SET, characterSet);
}
hints.put(DecodeHintType.NEED_RESULT_POINT_CALLBACK,
resultPointCallback);
}
Handler getHandler() {
try {
handlerInitLatch.await();
} catch (InterruptedException ie) {
// continue?
}
return handler;
}
@Override
public void run() {
Looper.prepare();
handler = new DecodeHandler(activity, hints);
handlerInitLatch.countDown();
Looper.loop();
}
}
| apache-2.0 |
samwelkey/coolweather | app/src/test/java/com/welkey/welkeyever/coolweather/ExampleUnitTest.java | 326 | package com.welkey.welkeyever.coolweather;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* To work on unit tests, switch the Test Artifact in the Build Variants view.
*/
public class ExampleUnitTest {
@Test
public void addition_isCorrect() throws Exception {
assertEquals(4, 2 + 2);
}
} | apache-2.0 |
Swrrt/Samza | samza-core/src/main/java/org/apache/samza/clustermanager/ClusterBasedJobCoordinator.java | 17447 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.clustermanager;
import com.google.common.annotations.VisibleForTesting;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.regex.Pattern;
import org.apache.samza.SamzaException;
import org.apache.samza.config.ClusterManagerConfig;
import org.apache.samza.config.Config;
import org.apache.samza.config.JobConfig;
import org.apache.samza.config.MapConfig;
import org.apache.samza.config.ShellCommandConfig;
import org.apache.samza.config.StorageConfig;
import org.apache.samza.config.TaskConfig;
import org.apache.samza.container.TaskName;
import org.apache.samza.coordinator.InputStreamsDiscoveredException;
import org.apache.samza.coordinator.JobModelManager;
import org.apache.samza.coordinator.MetadataResourceUtil;
import org.apache.samza.coordinator.PartitionChangeException;
import org.apache.samza.coordinator.StreamPartitionCountMonitor;
import org.apache.samza.coordinator.StreamRegexMonitor;
import org.apache.samza.coordinator.metadatastore.CoordinatorStreamStore;
import org.apache.samza.coordinator.metadatastore.NamespaceAwareCoordinatorStreamStore;
import org.apache.samza.coordinator.stream.messages.SetChangelogMapping;
import org.apache.samza.job.model.ContainerModel;
import org.apache.samza.job.model.JobModel;
import org.apache.samza.job.model.JobModelUtil;
import org.apache.samza.job.model.TaskModel;
import org.apache.samza.metrics.JmxServer;
import org.apache.samza.metrics.MetricsRegistryMap;
import org.apache.samza.serializers.model.SamzaObjectMapper;
import org.apache.samza.startpoint.StartpointManager;
import org.apache.samza.storage.ChangelogStreamManager;
import org.apache.samza.system.StreamMetadataCache;
import org.apache.samza.system.SystemAdmins;
import org.apache.samza.system.SystemStream;
import org.apache.samza.util.CoordinatorStreamUtil;
import org.apache.samza.util.DiagnosticsUtil;
import org.apache.samza.util.SystemClock;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Implements a JobCoordinator that is completely independent of the underlying cluster
* manager system. This {@link ClusterBasedJobCoordinator} handles functionality common
* to both Yarn and Mesos. It takes care of
* 1. Requesting resources from an underlying {@link ClusterResourceManager}.
* 2. Ensuring that placement of processors to resources happens (as per whether host affinity
* is configured or not).
*
* Any offer based cluster management system that must integrate with Samza will merely
* implement a {@link ResourceManagerFactory} and a {@link ClusterResourceManager}.
*
* This class is not thread-safe. For safe access in multi-threaded context, invocations
* should be synchronized by the callers.
*
* TODO:
* 1. Refactor ClusterResourceManager to also handle process liveness, process start
* callbacks
* 2. Refactor the JobModelReader to be an interface.
* 3. Make ClusterBasedJobCoordinator implement the JobCoordinator API as in SAMZA-881.
* 4. Refactor UI state variables.
* 5. Unit tests.
* 6. Document newly added configs.
*/
public class ClusterBasedJobCoordinator {
private static final Logger LOG = LoggerFactory.getLogger(ClusterBasedJobCoordinator.class);
private final static String METRICS_SOURCE_NAME = "ApplicationMaster";
private final Config config;
private final ClusterManagerConfig clusterManagerConfig;
/**
* State to track container failures, host-processor mappings
*/
private final SamzaApplicationState state;
//even though some of these can be converted to local variables, it will not be the case
//as we add more methods to the JobCoordinator and completely implement SAMZA-881.
/**
* Handles callback for allocated containers, failed containers.
*/
private final ContainerProcessManager containerProcessManager;
/**
* A JobModelManager to return and refresh the {@link org.apache.samza.job.model.JobModel} when required.
*/
private final JobModelManager jobModelManager;
/**
* A ChangelogStreamManager to handle creation of changelog stream and map changelog stream partitions
*/
private final ChangelogStreamManager changelogStreamManager;
/*
* The interval for polling the Task Manager for shutdown.
*/
private final long jobCoordinatorSleepInterval;
/*
* Config specifies if a Jmx server should be started on this Job Coordinator
*/
private final boolean isJmxEnabled;
/**
* Internal boolean to check if the job coordinator has already been started.
*/
private final AtomicBoolean isStarted = new AtomicBoolean(false);
/**
* A boolean variable indicating whether the job has durable state stores in the configuration
*/
private final boolean hasDurableStores;
/**
* The input topic partition count monitor
*/
private final Optional<StreamPartitionCountMonitor> partitionMonitor;
/**
* The input stream regex monitor
*/
private final Optional<StreamRegexMonitor> inputStreamRegexMonitor;
/**
* Metrics to track stats around container failures, needed containers etc.
*/
private final MetricsRegistryMap metrics;
private final CoordinatorStreamStore coordinatorStreamStore;
private final SystemAdmins systemAdmins;
/**
* Internal variable for the instance of {@link JmxServer}
*/
private JmxServer jmxServer;
/**
* Variable to keep the callback exception
*/
volatile private Exception coordinatorException = null;
/**
* Creates a new ClusterBasedJobCoordinator instance from a config. Invoke run() to actually
* run the jobcoordinator.
*
* @param coordinatorSystemConfig the coordinator stream config that can be used to read the
* {@link org.apache.samza.job.model.JobModel} from.
*/
public ClusterBasedJobCoordinator(Config coordinatorSystemConfig) {
metrics = new MetricsRegistryMap();
coordinatorStreamStore = new CoordinatorStreamStore(coordinatorSystemConfig, metrics);
coordinatorStreamStore.init();
config = CoordinatorStreamUtil.readConfigFromCoordinatorStream(coordinatorStreamStore);
// build a JobModelManager and ChangelogStreamManager and perform partition assignments.
changelogStreamManager = new ChangelogStreamManager(new NamespaceAwareCoordinatorStreamStore(coordinatorStreamStore, SetChangelogMapping.TYPE));
ClassLoader classLoader = getClass().getClassLoader();
jobModelManager =
JobModelManager.apply(config, changelogStreamManager.readPartitionMapping(), coordinatorStreamStore,
classLoader, metrics);
hasDurableStores = new StorageConfig(config).hasDurableStores();
state = new SamzaApplicationState(jobModelManager);
// The systemAdmins should be started before partitionMonitor can be used. And it should be stopped when this coordinator is stopped.
systemAdmins = new SystemAdmins(config);
partitionMonitor = getPartitionCountMonitor(config, systemAdmins);
Set<SystemStream> inputSystemStreams = JobModelUtil.getSystemStreams(jobModelManager.jobModel());
inputStreamRegexMonitor = getInputRegexMonitor(config, systemAdmins, inputSystemStreams);
clusterManagerConfig = new ClusterManagerConfig(config);
isJmxEnabled = clusterManagerConfig.getJmxEnabledOnJobCoordinator();
jobCoordinatorSleepInterval = clusterManagerConfig.getJobCoordinatorSleepInterval();
// build a container process Manager
containerProcessManager = createContainerProcessManager(classLoader);
}
/**
* Starts the JobCoordinator.
*/
public void run() {
if (!isStarted.compareAndSet(false, true)) {
LOG.warn("Attempting to start an already started job coordinator. ");
return;
}
// set up JmxServer (if jmx is enabled)
if (isJmxEnabled) {
jmxServer = new JmxServer();
state.jmxUrl = jmxServer.getJmxUrl();
state.jmxTunnelingUrl = jmxServer.getTunnelingJmxUrl();
} else {
jmxServer = null;
}
try {
// initialize JobCoordinator state
LOG.info("Starting cluster based job coordinator");
// write the diagnostics metadata file
String jobName = new JobConfig(config).getName().get();
String jobId = new JobConfig(config).getJobId();
Optional<String> execEnvContainerId = Optional.ofNullable(System.getenv("CONTAINER_ID"));
DiagnosticsUtil.writeMetadataFile(jobName, jobId, METRICS_SOURCE_NAME, execEnvContainerId, config);
//create necessary checkpoint and changelog streams, if not created
JobModel jobModel = jobModelManager.jobModel();
MetadataResourceUtil metadataResourceUtil =
new MetadataResourceUtil(jobModel, this.metrics, getClass().getClassLoader());
metadataResourceUtil.createResources();
// fan out the startpoints
StartpointManager startpointManager = createStartpointManager();
startpointManager.start();
try {
startpointManager.fanOut(JobModelUtil.getTaskToSystemStreamPartitions(jobModel));
} finally {
startpointManager.stop();
}
// Remap changelog partitions to tasks
Map<TaskName, Integer> prevPartitionMappings = changelogStreamManager.readPartitionMapping();
Map<TaskName, Integer> taskPartitionMappings = new HashMap<>();
Map<String, ContainerModel> containers = jobModel.getContainers();
for (ContainerModel containerModel : containers.values()) {
for (TaskModel taskModel : containerModel.getTasks().values()) {
taskPartitionMappings.put(taskModel.getTaskName(), taskModel.getChangelogPartition().getPartitionId());
}
}
changelogStreamManager.updatePartitionMapping(prevPartitionMappings, taskPartitionMappings);
containerProcessManager.start();
systemAdmins.start();
partitionMonitor.ifPresent(StreamPartitionCountMonitor::start);
inputStreamRegexMonitor.ifPresent(StreamRegexMonitor::start);
boolean isInterrupted = false;
while (!containerProcessManager.shouldShutdown() && !checkAndThrowException() && !isInterrupted) {
try {
Thread.sleep(jobCoordinatorSleepInterval);
} catch (InterruptedException e) {
isInterrupted = true;
LOG.error("Interrupted in job coordinator loop", e);
Thread.currentThread().interrupt();
}
}
} catch (Throwable e) {
LOG.error("Exception thrown in the JobCoordinator loop", e);
throw new SamzaException(e);
} finally {
onShutDown();
}
}
private boolean checkAndThrowException() throws Exception {
if (coordinatorException != null) {
throw coordinatorException;
}
return false;
}
/**
* Stops all components of the JobCoordinator.
*/
private void onShutDown() {
try {
partitionMonitor.ifPresent(StreamPartitionCountMonitor::stop);
inputStreamRegexMonitor.ifPresent(StreamRegexMonitor::stop);
systemAdmins.stop();
containerProcessManager.stop();
coordinatorStreamStore.close();
} catch (Throwable e) {
LOG.error("Exception while stopping cluster based job coordinator", e);
}
LOG.info("Stopped cluster based job coordinator");
if (jmxServer != null) {
try {
jmxServer.stop();
LOG.info("Stopped Jmx Server");
} catch (Throwable e) {
LOG.error("Exception while stopping jmx server", e);
}
}
}
private Optional<StreamPartitionCountMonitor> getPartitionCountMonitor(Config config, SystemAdmins systemAdmins) {
StreamMetadataCache streamMetadata = new StreamMetadataCache(systemAdmins, 0, SystemClock.instance());
Set<SystemStream> inputStreamsToMonitor = new TaskConfig(config).getAllInputStreams();
if (inputStreamsToMonitor.isEmpty()) {
throw new SamzaException("Input streams to a job can not be empty.");
}
return Optional.of(new StreamPartitionCountMonitor(inputStreamsToMonitor, streamMetadata, metrics,
new JobConfig(config).getMonitorPartitionChangeFrequency(), streamsChanged -> {
// Fail the jobs with durable state store. Otherwise, application state.status remains UNDEFINED s.t. YARN job will be restarted
if (hasDurableStores) {
LOG.error("Input topic partition count changed in a job with durable state. Failing the job. " +
"Changed topics: {}", streamsChanged.toString());
state.status = SamzaApplicationState.SamzaAppStatus.FAILED;
}
coordinatorException = new PartitionChangeException("Input topic partition count changes detected for topics: " + streamsChanged.toString());
}));
}
private Optional<StreamRegexMonitor> getInputRegexMonitor(Config config, SystemAdmins systemAdmins, Set<SystemStream> inputStreamsToMonitor) {
JobConfig jobConfig = new JobConfig(config);
// if input regex monitor is not enabled return empty
if (jobConfig.getMonitorRegexDisabled()) {
LOG.info("StreamRegexMonitor is disabled.");
return Optional.empty();
}
StreamMetadataCache streamMetadata = new StreamMetadataCache(systemAdmins, 0, SystemClock.instance());
if (inputStreamsToMonitor.isEmpty()) {
throw new SamzaException("Input streams to a job can not be empty.");
}
// First list all rewriters
Optional<String> rewritersList = jobConfig.getConfigRewriters();
// if no rewriter is defined, there is nothing to monitor
if (!rewritersList.isPresent()) {
LOG.warn("No config rewriters are defined. No StreamRegexMonitor created.");
return Optional.empty();
}
// Compile a map of each input-system to its corresponding input-monitor-regex patterns
Map<String, Pattern> inputRegexesToMonitor = jobConfig.getMonitorRegexPatternMap(rewritersList.get());
// if there are no regexes to monitor
if (inputRegexesToMonitor.isEmpty()) {
LOG.info("No input regexes are defined. No StreamRegexMonitor created.");
return Optional.empty();
}
return Optional.of(new StreamRegexMonitor(inputStreamsToMonitor, inputRegexesToMonitor, streamMetadata, metrics,
jobConfig.getMonitorRegexFrequency(), new StreamRegexMonitor.Callback() {
@Override
public void onInputStreamsChanged(Set<SystemStream> initialInputSet, Set<SystemStream> newInputStreams,
Map<String, Pattern> regexesMonitored) {
if (hasDurableStores) {
LOG.error("New input system-streams discovered. Failing the job. New input streams: {}" +
" Existing input streams: {}", newInputStreams, inputStreamsToMonitor);
state.status = SamzaApplicationState.SamzaAppStatus.FAILED;
}
coordinatorException = new InputStreamsDiscoveredException("New input streams discovered: " + newInputStreams);
}
}));
}
// The following two methods are package-private and for testing only
@VisibleForTesting
SamzaApplicationState.SamzaAppStatus getAppStatus() {
// make sure to only return a unmodifiable copy of the status variable
final SamzaApplicationState.SamzaAppStatus copy = state.status;
return copy;
}
@VisibleForTesting
StreamPartitionCountMonitor getPartitionMonitor() {
return partitionMonitor.get();
}
@VisibleForTesting
StartpointManager createStartpointManager() {
return new StartpointManager(coordinatorStreamStore);
}
@VisibleForTesting
ContainerProcessManager createContainerProcessManager(ClassLoader classLoader) {
return new ContainerProcessManager(config, state, metrics, classLoader);
}
/**
* The entry point for the {@link ClusterBasedJobCoordinator}
* @param args args
*/
public static void main(String[] args) {
Config coordinatorSystemConfig;
final String coordinatorSystemEnv = System.getenv(ShellCommandConfig.ENV_COORDINATOR_SYSTEM_CONFIG());
try {
//Read and parse the coordinator system config.
LOG.info("Parsing coordinator system config {}", coordinatorSystemEnv);
coordinatorSystemConfig =
new MapConfig(SamzaObjectMapper.getObjectMapper().readValue(coordinatorSystemEnv, Config.class));
LOG.info("Using the coordinator system config: {}.", coordinatorSystemConfig);
} catch (IOException e) {
LOG.error("Exception while reading coordinator stream config", e);
throw new SamzaException(e);
}
ClusterBasedJobCoordinator jc = new ClusterBasedJobCoordinator(coordinatorSystemConfig);
jc.run();
LOG.info("Finished running ClusterBasedJobCoordinator");
}
}
| apache-2.0 |
kuangrewawa/onos | core/net/src/test/java/org/onosproject/net/intent/impl/phase/InstallingTest.java | 5415 | /*
* Copyright 2015 Open Networking Laboratory
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.onosproject.net.intent.impl.phase;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.onosproject.TestApplicationId;
import org.onosproject.core.ApplicationId;
import org.onosproject.core.IdGenerator;
import org.onosproject.net.ConnectPoint;
import org.onosproject.net.DefaultLink;
import org.onosproject.net.DefaultPath;
import org.onosproject.net.Link;
import org.onosproject.net.Path;
import org.onosproject.net.flow.DefaultTrafficSelector;
import org.onosproject.net.flow.DefaultTrafficTreatment;
import org.onosproject.net.flow.FlowRuleOperations;
import org.onosproject.net.flow.TrafficSelector;
import org.onosproject.net.flow.TrafficTreatment;
import org.onosproject.net.intent.Intent;
import org.onosproject.net.intent.IntentData;
import org.onosproject.net.intent.MockIdGenerator;
import org.onosproject.net.intent.PathIntent;
import org.onosproject.net.intent.PointToPointIntent;
import org.onosproject.net.intent.impl.IntentInstallationException;
import org.onosproject.net.intent.impl.IntentProcessor;
import org.onosproject.net.provider.ProviderId;
import org.onosproject.store.Timestamp;
import java.util.Arrays;
import java.util.List;
import java.util.Optional;
import static org.easymock.EasyMock.createMock;
import static org.easymock.EasyMock.expectLastCall;
import static org.easymock.EasyMock.replay;
import static org.easymock.EasyMock.verify;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.is;
import static org.junit.Assert.assertThat;
import static org.onosproject.net.DeviceId.deviceId;
import static org.onosproject.net.Link.Type.DIRECT;
import static org.onosproject.net.PortNumber.portNumber;
import static org.onosproject.net.intent.IntentState.INSTALL_REQ;
/**
* Unit tests for Installing phase.
*/
public class InstallingTest {
private final ApplicationId appId = new TestApplicationId("test");
private final ProviderId pid = new ProviderId("of", "test");
private final TrafficSelector selector = DefaultTrafficSelector.emptySelector();
private final TrafficTreatment treatment = DefaultTrafficTreatment.emptyTreatment();
private final ConnectPoint cp1 = new ConnectPoint(deviceId("1"), portNumber(1));
private final ConnectPoint cp2 = new ConnectPoint(deviceId("1"), portNumber(2));
private final ConnectPoint cp3 = new ConnectPoint(deviceId("2"), portNumber(1));
private final ConnectPoint cp4 = new ConnectPoint(deviceId("2"), portNumber(2));
private final List<Link> links = Arrays.asList(new DefaultLink(pid, cp2, cp4, DIRECT));
private final Path path = new DefaultPath(pid, links, 10);
private PointToPointIntent input;
private PathIntent compiled;
private IdGenerator idGenerator;
private IntentProcessor processor;
private Timestamp version;
@Before
public void setUp() {
processor = createMock(IntentProcessor.class);
version = createMock(Timestamp.class);
idGenerator = new MockIdGenerator();
Intent.bindIdGenerator(idGenerator);
// Intent creation should be placed after binding an ID generator
input = new PointToPointIntent(appId, selector, treatment, cp1, cp3);
compiled = new PathIntent(appId, selector, treatment, path);
}
@After
public void tearDown() {
Intent.unbindIdGenerator(idGenerator);
}
/**
* Tests a next phase when no exception occurs.
*/
@Test
public void testMoveToNextPhaseWithoutError() {
IntentData pending = new IntentData(input, INSTALL_REQ, version);
pending.setInstallables(Arrays.asList(compiled));
FlowRuleOperations operations = createMock(FlowRuleOperations.class);
processor.applyFlowRules(operations);
replay(processor);
Installing sut = new Installing(processor, pending, operations);
Optional<IntentProcessPhase> executed = sut.execute();
verify(processor);
assertThat(executed.get(), is(instanceOf(Installed.class)));
}
/**
* Test a next phase when IntentInstallationException occurs.
*/
@Test
public void testWhenIntentInstallationExceptionOccurs() {
IntentData pending = new IntentData(input, INSTALL_REQ, version);
pending.setInstallables(Arrays.asList(compiled));
FlowRuleOperations operations = createMock(FlowRuleOperations.class);
processor.applyFlowRules(operations);
expectLastCall().andThrow(new IntentInstallationException());
replay(processor);
Installing sut = new Installing(processor, pending, operations);
Optional<IntentProcessPhase> executed = sut.execute();
verify(processor);
assertThat(executed.get(), is(instanceOf(InstallingFailed.class)));
}
}
| apache-2.0 |
Huck/worldcoinj | core/src/main/java/com/google/worldcoin/core/Block.java | 46212 | /**
* Copyright 2011 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.worldcoin.core;
import com.google.worldcoin.script.Script;
import com.google.worldcoin.script.ScriptBuilder;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.Nullable;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.OutputStream;
import java.math.BigInteger;
import java.util.ArrayList;
import java.util.Date;
import java.util.LinkedList;
import java.util.List;
import static com.google.worldcoin.core.Utils.doubleDigest;
import static com.google.worldcoin.core.Utils.doubleDigestTwoBuffers;
import static com.google.worldcoin.core.Utils.scryptDigest;
/**
* <p>A block is a group of transactions, and is one of the fundamental data structures of the Bitcoin system.
* It records a set of {@link Transaction}s together with some data that links it into a place in the global block
* chain, and proves that a difficult calculation was done over its contents. See
* <a href="http://www.worldcoin.org/worldcoin.pdf">the Bitcoin technical paper</a> for
* more detail on blocks. <p/>
*
* To get a block, you can either build one from the raw bytes you can get from another implementation, or request one
* specifically using {@link Peer#getBlock(Sha256Hash)}, or grab one from a downloaded {@link BlockChain}.
*/
public class Block extends Message {
private static final Logger log = LoggerFactory.getLogger(Block.class);
private static final long serialVersionUID = 2738848929966035281L;
/** How many bytes are required to represent a block header WITHOUT the trailing 00 length byte. */
public static final int HEADER_SIZE = 80;
static final long ALLOWED_TIME_DRIFT = 2 * 60 * 60; // Same value as official client.
/**
* A constant shared by the entire network: how large in bytes a block is allowed to be. One day we may have to
* upgrade everyone to change this, so Bitcoin can continue to grow. For now it exists as an anti-DoS measure to
* avoid somebody creating a titanically huge but valid block and forcing everyone to download/store it forever.
*/
public static final int MAX_BLOCK_SIZE = 1 * 1000 * 1000;
/**
* A "sigop" is a signature verification operation. Because they're expensive we also impose a separate limit on
* the number in a block to prevent somebody mining a huge block that has way more sigops than normal, so is very
* expensive/slow to verify.
*/
public static final int MAX_BLOCK_SIGOPS = MAX_BLOCK_SIZE / 50;
/** A value for difficultyTarget (nBits) that allows half of all possible hash solutions. Used in unit testing. */
public static final long EASIEST_DIFFICULTY_TARGET = 0x207fFFFFL;
// Fields defined as part of the protocol format.
private long version;
private Sha256Hash prevBlockHash;
private Sha256Hash merkleRoot;
private long time;
private long difficultyTarget; // "nBits"
private long nonce;
/** If null, it means this object holds only the headers. */
List<Transaction> transactions;
/** Stores the hash of the block. If null, getHash() will recalculate it. */
private transient Sha256Hash hash;
private transient Sha256Hash scryptHash;
private transient boolean headerParsed;
private transient boolean transactionsParsed;
private transient boolean headerBytesValid;
private transient boolean transactionBytesValid;
// Blocks can be encoded in a way that will use more bytes than is optimal (due to VarInts having multiple encodings)
// MAX_BLOCK_SIZE must be compared to the optimal encoding, not the actual encoding, so when parsing, we keep track
// of the size of the ideal encoding in addition to the actual message size (which Message needs)
private transient int optimalEncodingMessageSize;
/** Special case constructor, used for the genesis node, cloneAsHeader and unit tests. */
Block(NetworkParameters params) {
super(params);
// Set up a few basic things. We are not complete after this though.
version = 1;
difficultyTarget = 0x1e0ffff0L;
time = System.currentTimeMillis() / 1000;
prevBlockHash = Sha256Hash.ZERO_HASH;
length = 80;
}
/** Constructs a block object from the Bitcoin wire format. */
public Block(NetworkParameters params, byte[] payloadBytes) throws ProtocolException {
super(params, payloadBytes, 0, false, false, payloadBytes.length);
}
/**
* Contruct a block object from the Bitcoin wire format.
* @param params NetworkParameters object.
* @param parseLazy Whether to perform a full parse immediately or delay until a read is requested.
* @param parseRetain Whether to retain the backing byte array for quick reserialization.
* If true and the backing byte array is invalidated due to modification of a field then
* the cached bytes may be repopulated and retained if the message is serialized again in the future.
* @param length The length of message if known. Usually this is provided when deserializing of the wire
* as the length will be provided as part of the header. If unknown then set to Message.UNKNOWN_LENGTH
* @throws ProtocolException
*/
public Block(NetworkParameters params, byte[] payloadBytes, boolean parseLazy, boolean parseRetain, int length)
throws ProtocolException {
super(params, payloadBytes, 0, parseLazy, parseRetain, length);
}
/**
* Construct a block initialized with all the given fields.
* @param params Which network the block is for.
* @param version This should usually be set to 1 or 2, depending on if the height is in the coinbase input.
* @param prevBlockHash Reference to previous block in the chain or {@link Sha256Hash#ZERO_HASH} if genesis.
* @param merkleRoot The root of the merkle tree formed by the transactions.
* @param time UNIX time when the block was mined.
* @param difficultyTarget Number which this block hashes lower than.
* @param nonce Arbitrary number to make the block hash lower than the target.
* @param transactions List of transactions including the coinbase.
*/
public Block(NetworkParameters params, long version, Sha256Hash prevBlockHash, Sha256Hash merkleRoot, long time,
long difficultyTarget, long nonce, List<Transaction> transactions) {
super(params);
this.version = version;
this.prevBlockHash = prevBlockHash;
this.merkleRoot = merkleRoot;
this.time = time;
this.difficultyTarget = difficultyTarget;
this.nonce = nonce;
this.transactions = new LinkedList<Transaction>();
this.transactions.addAll(transactions);
}
/**
* <p>A utility method that calculates how much new Bitcoin would be created by the block at the given height.
* The inflation of Bitcoin is predictable and drops roughly every 4 years (210,000 blocks). At the dawn of
* the system it was 50 coins per block, in late 2012 it went to 25 coins per block, and so on. The size of
* a coinbase transaction is inflation plus fees.</p>
*
* <p>The half-life is controlled by {@link com.google.worldcoin.core.NetworkParameters#getSubsidyDecreaseBlockCount()}.
* </p>
*/
public BigInteger getBlockInflation(int height) {
return Utils.toNanoCoins(50, 0).shiftRight(height / params.getSubsidyDecreaseBlockCount());
}
private void readObject(ObjectInputStream ois) throws ClassNotFoundException, IOException {
ois.defaultReadObject();
// This code is not actually necessary, as transient fields are initialized to the default value which is in
// this case null. However it clears out a FindBugs warning and makes it explicit what we're doing.
hash = null;
}
private void parseHeader() throws ProtocolException {
if (headerParsed)
return;
cursor = offset;
version = readUint32();
prevBlockHash = readHash();
merkleRoot = readHash();
time = readUint32();
difficultyTarget = readUint32();
nonce = readUint32();
hash = new Sha256Hash(Utils.reverseBytes(Utils.doubleDigest(bytes, offset, cursor)));
headerParsed = true;
headerBytesValid = parseRetain;
}
private void parseTransactions() throws ProtocolException {
if (transactionsParsed)
return;
cursor = offset + HEADER_SIZE;
optimalEncodingMessageSize = HEADER_SIZE;
if (bytes.length == cursor) {
// This message is just a header, it has no transactions.
transactionsParsed = true;
transactionBytesValid = false;
return;
}
int numTransactions = (int) readVarInt();
optimalEncodingMessageSize += VarInt.sizeOf(numTransactions);
transactions = new ArrayList<Transaction>(numTransactions);
for (int i = 0; i < numTransactions; i++) {
Transaction tx = new Transaction(params, bytes, cursor, this, parseLazy, parseRetain, UNKNOWN_LENGTH);
// Label the transaction as coming from the P2P network, so code that cares where we first saw it knows.
tx.getConfidence().setSource(TransactionConfidence.Source.NETWORK);
transactions.add(tx);
cursor += tx.getMessageSize();
optimalEncodingMessageSize += tx.getOptimalEncodingMessageSize();
}
// No need to set length here. If length was not provided then it should be set at the end of parseLight().
// If this is a genuine lazy parse then length must have been provided to the constructor.
transactionsParsed = true;
transactionBytesValid = parseRetain;
}
void parse() throws ProtocolException {
parseHeader();
parseTransactions();
length = cursor - offset;
}
public int getOptimalEncodingMessageSize() {
if (optimalEncodingMessageSize != 0)
return optimalEncodingMessageSize;
maybeParseTransactions();
if (optimalEncodingMessageSize != 0)
return optimalEncodingMessageSize;
optimalEncodingMessageSize = getMessageSize();
return optimalEncodingMessageSize;
}
protected void parseLite() throws ProtocolException {
// Ignore the header since it has fixed length. If length is not provided we will have to
// invoke a light parse of transactions to calculate the length.
if (length == UNKNOWN_LENGTH) {
Preconditions.checkState(parseLazy,
"Performing lite parse of block transaction as block was initialised from byte array " +
"without providing length. This should never need to happen.");
parseTransactions();
length = cursor - offset;
} else {
transactionBytesValid = !transactionsParsed || parseRetain && length > HEADER_SIZE;
}
headerBytesValid = !headerParsed || parseRetain && length >= HEADER_SIZE;
}
/*
* Block uses some special handling for lazy parsing and retention of cached bytes. Parsing and serializing the
* block header and the transaction list are both non-trivial so there are good efficiency gains to be had by
* separating them. There are many cases where a user may need to access or change one or the other but not both.
*
* With this in mind we ignore the inherited checkParse() and unCache() methods and implement a separate version
* of them for both header and transactions.
*
* Serializing methods are also handled in their own way. Whilst they deal with separate parts of the block structure
* there are some interdependencies. For example altering a tx requires invalidating the Merkle root and therefore
* the cached header bytes.
*/
private void maybeParseHeader() {
if (headerParsed || bytes == null)
return;
try {
parseHeader();
if (!(headerBytesValid || transactionBytesValid))
bytes = null;
} catch (ProtocolException e) {
throw new LazyParseException(
"ProtocolException caught during lazy parse. For safe access to fields call ensureParsed before attempting read or write access",
e);
}
}
private void maybeParseTransactions() {
if (transactionsParsed || bytes == null)
return;
try {
parseTransactions();
if (!parseRetain) {
transactionBytesValid = false;
if (headerParsed)
bytes = null;
}
} catch (ProtocolException e) {
throw new LazyParseException(
"ProtocolException caught during lazy parse. For safe access to fields call ensureParsed before attempting read or write access",
e);
}
}
/**
* Ensure the object is parsed if needed. This should be called in every getter before returning a value. If the
* lazy parse flag is not set this is a method returns immediately.
*/
protected void maybeParse() {
throw new LazyParseException(
"checkParse() should never be called on a Block. Instead use checkParseHeader() and checkParseTransactions()");
}
/**
* In lazy parsing mode access to getters and setters may throw an unchecked LazyParseException. If guaranteed
* safe access is required this method will force parsing to occur immediately thus ensuring LazyParseExeption will
* never be thrown from this Message. If the Message contains child messages (e.g. a Block containing Transaction
* messages) this will not force child messages to parse.
*
* This method ensures parsing of both headers and transactions.
*
* @throws ProtocolException
*/
public void ensureParsed() throws ProtocolException {
try {
maybeParseHeader();
maybeParseTransactions();
} catch (LazyParseException e) {
if (e.getCause() instanceof ProtocolException)
throw (ProtocolException) e.getCause();
throw new ProtocolException(e);
}
}
/**
* In lazy parsing mode access to getters and setters may throw an unchecked LazyParseException. If guaranteed
* safe access is required this method will force parsing to occur immediately thus ensuring LazyParseExeption
* will never be thrown from this Message. If the Message contains child messages (e.g. a Block containing
* Transaction messages) this will not force child messages to parse.
*
* This method ensures parsing of headers only.
*
* @throws ProtocolException
*/
public void ensureParsedHeader() throws ProtocolException {
try {
maybeParseHeader();
} catch (LazyParseException e) {
if (e.getCause() instanceof ProtocolException)
throw (ProtocolException) e.getCause();
throw new ProtocolException(e);
}
}
/**
* In lazy parsing mode access to getters and setters may throw an unchecked LazyParseException. If guaranteed
* safe access is required this method will force parsing to occur immediately thus ensuring LazyParseExeption will
* never be thrown from this Message. If the Message contains child messages (e.g. a Block containing Transaction
* messages) this will not force child messages to parse.
*
* This method ensures parsing of transactions only.
*
* @throws ProtocolException
*/
public void ensureParsedTransactions() throws ProtocolException {
try {
maybeParseTransactions();
} catch (LazyParseException e) {
if (e.getCause() instanceof ProtocolException)
throw (ProtocolException) e.getCause();
throw new ProtocolException(e);
}
}
// default for testing
void writeHeader(OutputStream stream) throws IOException {
// try for cached write first
if (headerBytesValid && bytes != null && bytes.length >= offset + HEADER_SIZE) {
stream.write(bytes, offset, HEADER_SIZE);
return;
}
// fall back to manual write
maybeParseHeader();
Utils.uint32ToByteStreamLE(version, stream);
stream.write(Utils.reverseBytes(prevBlockHash.getBytes()));
stream.write(Utils.reverseBytes(getMerkleRoot().getBytes()));
Utils.uint32ToByteStreamLE(time, stream);
Utils.uint32ToByteStreamLE(difficultyTarget, stream);
Utils.uint32ToByteStreamLE(nonce, stream);
}
private void writeTransactions(OutputStream stream) throws IOException {
// check for no transaction conditions first
// must be a more efficient way to do this but I'm tired atm.
if (transactions == null && transactionsParsed) {
return;
}
// confirmed we must have transactions either cached or as objects.
if (transactionBytesValid && bytes != null && bytes.length >= offset + length) {
stream.write(bytes, offset + HEADER_SIZE, length - HEADER_SIZE);
return;
}
if (transactions != null) {
stream.write(new VarInt(transactions.size()).encode());
for (Transaction tx : transactions) {
tx.worldcoinSerialize(stream);
}
}
}
/**
* Special handling to check if we have a valid byte array for both header
* and transactions
*
* @throws IOException
*/
public byte[] worldcoinSerialize() {
// we have completely cached byte array.
if (headerBytesValid && transactionBytesValid) {
Preconditions.checkNotNull(bytes, "Bytes should never be null if headerBytesValid && transactionBytesValid");
if (length == bytes.length) {
return bytes;
} else {
// byte array is offset so copy out the correct range.
byte[] buf = new byte[length];
System.arraycopy(bytes, offset, buf, 0, length);
return buf;
}
}
// At least one of the two cacheable components is invalid
// so fall back to stream write since we can't be sure of the length.
ByteArrayOutputStream stream = new UnsafeByteArrayOutputStream(length == UNKNOWN_LENGTH ? HEADER_SIZE + guessTransactionsLength() : length);
try {
writeHeader(stream);
writeTransactions(stream);
} catch (IOException e) {
// Cannot happen, we are serializing to a memory stream.
}
return stream.toByteArray();
}
@Override
protected void worldcoinSerializeToStream(OutputStream stream) throws IOException {
writeHeader(stream);
// We may only have enough data to write the header.
writeTransactions(stream);
}
/**
* Provides a reasonable guess at the byte length of the transactions part of the block.
* The returned value will be accurate in 99% of cases and in those cases where not will probably slightly
* oversize.
*
* This is used to preallocate the underlying byte array for a ByteArrayOutputStream. If the size is under the
* real value the only penalty is resizing of the underlying byte array.
*/
private int guessTransactionsLength() {
if (transactionBytesValid)
return bytes.length - HEADER_SIZE;
if (transactions == null)
return 0;
int len = VarInt.sizeOf(transactions.size());
for (Transaction tx : transactions) {
// 255 is just a guess at an average tx length
len += tx.length == UNKNOWN_LENGTH ? 255 : tx.length;
}
return len;
}
protected void unCache() {
// Since we have alternate uncache methods to use internally this will only ever be called by a child
// transaction so we only need to invalidate that part of the cache.
unCacheTransactions();
}
private void unCacheHeader() {
maybeParseHeader();
headerBytesValid = false;
if (!transactionBytesValid)
bytes = null;
hash = null;
checksum = null;
}
private void unCacheTransactions() {
maybeParseTransactions();
transactionBytesValid = false;
if (!headerBytesValid)
bytes = null;
// Current implementation has to uncache headers as well as any change to a tx will alter the merkle root. In
// future we can go more granular and cache merkle root separately so rest of the header does not need to be
// rewritten.
unCacheHeader();
// Clear merkleRoot last as it may end up being parsed during unCacheHeader().
merkleRoot = null;
}
/**
* Calculates the block hash by serializing the block and hashing the
* resulting bytes.
*/
private Sha256Hash calculateHash() {
try {
ByteArrayOutputStream bos = new UnsafeByteArrayOutputStream(HEADER_SIZE);
writeHeader(bos);
return new Sha256Hash(Utils.reverseBytes(doubleDigest(bos.toByteArray())));
} catch (IOException e) {
throw new RuntimeException(e); // Cannot happen.
}
}
private Sha256Hash calculateScryptHash() {
try {
ByteArrayOutputStream bos = new UnsafeByteArrayOutputStream(HEADER_SIZE);
writeHeader(bos);
return new Sha256Hash(Utils.reverseBytes(scryptDigest(bos.toByteArray())));
} catch (IOException e) {
throw new RuntimeException(e); // Cannot happen.
}
}
/**
* Returns the hash of the block (which for a valid, solved block should be below the target) in the form seen on
* the block explorer. If you call this on block 1 in the production chain
* you will get "00000000839a8e6886ab5951d76f411475428afc90947ee320161bbf18eb6048".
*/
public String getHashAsString() {
return getHash().toString();
}
public String getScryptHashAsString() {
return getScryptHash().toString();
}
/**
* Returns the hash of the block (which for a valid, solved block should be
* below the target). Big endian.
*/
public Sha256Hash getHash() {
if (hash == null)
hash = calculateHash();
return hash;
}
public Sha256Hash getScryptHash() {
if (scryptHash == null)
scryptHash = calculateScryptHash();
return scryptHash;
}
/**
* The number that is one greater than the largest representable SHA-256
* hash.
*/
static private BigInteger LARGEST_HASH = BigInteger.ONE.shiftLeft(256);
/**
* Returns the work represented by this block.<p>
*
* Work is defined as the number of tries needed to solve a block in the
* average case. Consider a difficulty target that covers 5% of all possible
* hash values. Then the work of the block will be 20. As the target gets
* lower, the amount of work goes up.
*/
public BigInteger getWork() throws VerificationException {
BigInteger target = getDifficultyTargetAsInteger();
return LARGEST_HASH.divide(target.add(BigInteger.ONE));
}
/** Returns a copy of the block, but without any transactions. */
public Block cloneAsHeader() {
maybeParseHeader();
Block block = new Block(params);
block.nonce = nonce;
block.prevBlockHash = prevBlockHash.duplicate();
block.merkleRoot = getMerkleRoot().duplicate();
block.version = version;
block.time = time;
block.difficultyTarget = difficultyTarget;
block.transactions = null;
block.hash = getHash().duplicate();
return block;
}
/**
* Returns a multi-line string containing a description of the contents of
* the block. Use for debugging purposes only.
*/
@Override
public String toString() {
StringBuilder s = new StringBuilder("v");
s.append(version);
s.append(" block: \n");
s.append(" previous block: ");
s.append(getPrevBlockHash());
s.append("\n");
s.append(" merkle root: ");
s.append(getMerkleRoot());
s.append("\n");
s.append(" time: [");
s.append(time);
s.append("] ");
s.append(new Date(time * 1000));
s.append("\n");
s.append(" difficulty target (nBits): ");
s.append(difficultyTarget);
s.append("\n");
s.append(" nonce: ");
s.append(nonce);
s.append("\n");
if (transactions != null && transactions.size() > 0) {
s.append(" with ").append(transactions.size()).append(" transaction(s):\n");
for (Transaction tx : transactions) {
s.append(tx.toString());
}
}
return s.toString();
}
/**
* <p>Finds a value of nonce that makes the blocks hash lower than the difficulty target. This is called mining, but
* solve() is far too slow to do real mining with. It exists only for unit testing purposes.
*
* <p>This can loop forever if a solution cannot be found solely by incrementing nonce. It doesn't change
* extraNonce.</p>
*/
public void solve() {
maybeParseHeader();
while (true) {
try {
// Is our proof of work valid yet?
if (checkProofOfWork(false))
return;
// No, so increment the nonce and try again.
setNonce(getNonce() + 1);
} catch (VerificationException e) {
throw new RuntimeException(e); // Cannot happen.
}
}
}
/**
* Returns the difficulty target as a 256 bit value that can be compared to a SHA-256 hash. Inside a block the
* target is represented using a compact form. If this form decodes to a value that is out of bounds, an exception
* is thrown.
*/
public BigInteger getDifficultyTargetAsInteger() throws VerificationException {
maybeParseHeader();
BigInteger target = Utils.decodeCompactBits(difficultyTarget);
if (target.compareTo(BigInteger.ZERO) <= 0 || target.compareTo(params.proofOfWorkLimit) > 0)
throw new VerificationException("Difficulty target is bad: " + target.toString());
return target;
}
/** Returns true if the hash of the block is OK (lower than difficulty target). */
private boolean checkProofOfWork(boolean throwException) throws VerificationException {
// This part is key - it is what proves the block was as difficult to make as it claims
// to be. Note however that in the context of this function, the block can claim to be
// as difficult as it wants to be .... if somebody was able to take control of our network
// connection and fork us onto a different chain, they could send us valid blocks with
// ridiculously easy difficulty and this function would accept them.
//
// To prevent this attack from being possible, elsewhere we check that the difficultyTarget
// field is of the right value. This requires us to have the preceeding blocks.
BigInteger target = getDifficultyTargetAsInteger();
BigInteger h = getScryptHash().toBigInteger();
if (h.compareTo(target) > 0) {
// Proof of work check failed!
if (throwException)
throw new VerificationException("Hash is higher than target: " + getHashAsString() + " vs "
+ target.toString(16));
else
return false;
}
return true;
}
private void checkTimestamp() throws VerificationException {
maybeParseHeader();
// Allow injection of a fake clock to allow unit testing.
long currentTime = Utils.currentTimeMillis()/1000;
if (time > currentTime + ALLOWED_TIME_DRIFT)
throw new VerificationException("Block too far in future");
}
private void checkSigOps() throws VerificationException {
// Check there aren't too many signature verifications in the block. This is an anti-DoS measure, see the
// comments for MAX_BLOCK_SIGOPS.
int sigOps = 0;
for (Transaction tx : transactions) {
sigOps += tx.getSigOpCount();
}
if (sigOps > MAX_BLOCK_SIGOPS)
throw new VerificationException("Block had too many Signature Operations");
}
private void checkMerkleRoot() throws VerificationException {
Sha256Hash calculatedRoot = calculateMerkleRoot();
if (!calculatedRoot.equals(merkleRoot)) {
log.error("Merkle tree did not verify");
throw new VerificationException("Merkle hashes do not match: " + calculatedRoot + " vs " + merkleRoot);
}
}
private Sha256Hash calculateMerkleRoot() {
List<byte[]> tree = buildMerkleTree();
return new Sha256Hash(tree.get(tree.size() - 1));
}
private List<byte[]> buildMerkleTree() {
// The Merkle root is based on a tree of hashes calculated from the transactions:
//
// root
// / \
// A B
// / \ / \
// t1 t2 t3 t4
//
// The tree is represented as a list: t1,t2,t3,t4,A,B,root where each
// entry is a hash.
//
// The hashing algorithm is double SHA-256. The leaves are a hash of the serialized contents of the transaction.
// The interior nodes are hashes of the concenation of the two child hashes.
//
// This structure allows the creation of proof that a transaction was included into a block without having to
// provide the full block contents. Instead, you can provide only a Merkle branch. For example to prove tx2 was
// in a block you can just provide tx2, the hash(tx1) and B. Now the other party has everything they need to
// derive the root, which can be checked against the block header. These proofs aren't used right now but
// will be helpful later when we want to download partial block contents.
//
// Note that if the number of transactions is not even the last tx is repeated to make it so (see
// tx3 above). A tree with 5 transactions would look like this:
//
// root
// / \
// 1 5
// / \ / \
// 2 3 4 4
// / \ / \ / \
// t1 t2 t3 t4 t5 t5
maybeParseTransactions();
ArrayList<byte[]> tree = new ArrayList<byte[]>();
// Start by adding all the hashes of the transactions as leaves of the tree.
for (Transaction t : transactions) {
tree.add(t.getHash().getBytes());
}
int levelOffset = 0; // Offset in the list where the currently processed level starts.
// Step through each level, stopping when we reach the root (levelSize == 1).
for (int levelSize = transactions.size(); levelSize > 1; levelSize = (levelSize + 1) / 2) {
// For each pair of nodes on that level:
for (int left = 0; left < levelSize; left += 2) {
// The right hand node can be the same as the left hand, in the case where we don't have enough
// transactions.
int right = Math.min(left + 1, levelSize - 1);
byte[] leftBytes = Utils.reverseBytes(tree.get(levelOffset + left));
byte[] rightBytes = Utils.reverseBytes(tree.get(levelOffset + right));
tree.add(Utils.reverseBytes(doubleDigestTwoBuffers(leftBytes, 0, 32, rightBytes, 0, 32)));
}
// Move to the next level.
levelOffset += levelSize;
}
return tree;
}
private void checkTransactions() throws VerificationException {
// The first transaction in a block must always be a coinbase transaction.
if (!transactions.get(0).isCoinBase())
throw new VerificationException("First tx is not coinbase");
// The rest must not be.
for (int i = 1; i < transactions.size(); i++) {
if (transactions.get(i).isCoinBase())
throw new VerificationException("TX " + i + " is coinbase when it should not be.");
}
}
/**
* Checks the block data to ensure it follows the rules laid out in the network parameters. Specifically,
* throws an exception if the proof of work is invalid, or if the timestamp is too far from what it should be.
* This is <b>not</b> everything that is required for a block to be valid, only what is checkable independent
* of the chain and without a transaction index.
*
* @throws VerificationException
*/
public void verifyHeader() throws VerificationException {
// Prove that this block is OK. It might seem that we can just ignore most of these checks given that the
// network is also verifying the blocks, but we cannot as it'd open us to a variety of obscure attacks.
//
// Firstly we need to ensure this block does in fact represent real work done. If the difficulty is high
// enough, it's probably been done by the network.
maybeParseHeader();
checkProofOfWork(true);
checkTimestamp();
}
/**
* Checks the block contents
*
* @throws VerificationException
*/
public void verifyTransactions() throws VerificationException {
// Now we need to check that the body of the block actually matches the headers. The network won't generate
// an invalid block, but if we didn't validate this then an untrusted man-in-the-middle could obtain the next
// valid block from the network and simply replace the transactions in it with their own fictional
// transactions that reference spent or non-existant inputs.
if (transactions.isEmpty())
throw new VerificationException("Block had no transactions");
maybeParseTransactions();
if (this.getOptimalEncodingMessageSize() > MAX_BLOCK_SIZE)
throw new VerificationException("Block larger than MAX_BLOCK_SIZE");
checkTransactions();
checkMerkleRoot();
checkSigOps();
for (Transaction transaction : transactions)
transaction.verify();
}
/**
* Verifies both the header and that the transactions hash to the merkle root.
*/
public void verify() throws VerificationException {
verifyHeader();
verifyTransactions();
}
@Override
public boolean equals(Object o) {
if (!(o instanceof Block))
return false;
Block other = (Block) o;
return getHash().equals(other.getHash());
}
@Override
public int hashCode() {
return getHash().hashCode();
}
/**
* Returns the merkle root in big endian form, calculating it from transactions if necessary.
*/
public Sha256Hash getMerkleRoot() {
maybeParseHeader();
if (merkleRoot == null) {
//TODO check if this is really necessary.
unCacheHeader();
merkleRoot = calculateMerkleRoot();
}
return merkleRoot;
}
/** Exists only for unit testing. */
void setMerkleRoot(Sha256Hash value) {
unCacheHeader();
merkleRoot = value;
hash = null;
}
/** Adds a transaction to this block. The nonce and merkle root are invalid after this. */
public void addTransaction(Transaction t) {
addTransaction(t, true);
}
/** Adds a transaction to this block, with or without checking the sanity of doing so */
void addTransaction(Transaction t, boolean runSanityChecks) {
unCacheTransactions();
if (transactions == null) {
transactions = new ArrayList<Transaction>();
}
t.setParent(this);
if (runSanityChecks && transactions.size() == 0 && !t.isCoinBase())
throw new RuntimeException("Attempted to add a non-coinbase transaction as the first transaction: " + t);
else if (runSanityChecks && transactions.size() > 0 && t.isCoinBase())
throw new RuntimeException("Attempted to add a coinbase transaction when there already is one: " + t);
transactions.add(t);
adjustLength(transactions.size(), t.length);
// Force a recalculation next time the values are needed.
merkleRoot = null;
hash = null;
}
/** Returns the version of the block data structure as defined by the Bitcoin protocol. */
public long getVersion() {
maybeParseHeader();
return version;
}
/**
* Returns the hash of the previous block in the chain, as defined by the block header.
*/
public Sha256Hash getPrevBlockHash() {
maybeParseHeader();
return prevBlockHash;
}
void setPrevBlockHash(Sha256Hash prevBlockHash) {
unCacheHeader();
this.prevBlockHash = prevBlockHash;
this.hash = null;
this.scryptHash = null;
}
/**
* Returns the time at which the block was solved and broadcast, according to the clock of the solving node. This
* is measured in seconds since the UNIX epoch (midnight Jan 1st 1970).
*/
public long getTimeSeconds() {
maybeParseHeader();
return time;
}
/**
* Returns the time at which the block was solved and broadcast, according to the clock of the solving node.
*/
public Date getTime() {
return new Date(getTimeSeconds()*1000);
}
public void setTime(long time) {
unCacheHeader();
this.time = time;
this.hash = null;
this.scryptHash = null;
}
/**
* Returns the difficulty of the proof of work that this block should meet encoded <b>in compact form</b>. The {@link
* BlockChain} verifies that this is not too easy by looking at the length of the chain when the block is added.
* To find the actual value the hash should be compared against, use
* {@link com.google.worldcoin.core.Block#getDifficultyTargetAsInteger()}. Note that this is <b>not</b> the same as
* the difficulty value reported by the Bitcoin "getdifficulty" RPC that you may see on various block explorers.
* That number is the result of applying a formula to the underlying difficulty to normalize the minimum to 1.
* Calculating the difficulty that way is currently unsupported.
*/
public long getDifficultyTarget() {
maybeParseHeader();
return difficultyTarget;
}
/** Sets the difficulty target in compact form. */
public void setDifficultyTarget(long compactForm) {
unCacheHeader();
this.difficultyTarget = compactForm;
this.hash = null;
this.scryptHash = null;
}
/**
* Returns the nonce, an arbitrary value that exists only to make the hash of the block header fall below the
* difficulty target.
*/
public long getNonce() {
maybeParseHeader();
return nonce;
}
/** Sets the nonce and clears any cached data. */
public void setNonce(long nonce) {
unCacheHeader();
this.nonce = nonce;
this.hash = null;
this.scryptHash = null;
}
/** Returns an immutable list of transactions held in this block. */
public List<Transaction> getTransactions() {
maybeParseTransactions();
return ImmutableList.copyOf(transactions);
}
// ///////////////////////////////////////////////////////////////////////////////////////////////
// Unit testing related methods.
// Used to make transactions unique.
static private int txCounter;
/** Adds a coinbase transaction to the block. This exists for unit tests. */
void addCoinbaseTransaction(byte[] pubKeyTo, BigInteger value) {
unCacheTransactions();
transactions = new ArrayList<Transaction>();
Transaction coinbase = new Transaction(params);
// A real coinbase transaction has some stuff in the scriptSig like the extraNonce and difficulty. The
// transactions are distinguished by every TX output going to a different key.
//
// Here we will do things a bit differently so a new address isn't needed every time. We'll put a simple
// counter in the scriptSig so every transaction has a different hash.
coinbase.addInput(new TransactionInput(params, coinbase, new byte[]{(byte) txCounter, (byte) (txCounter++ >> 8)}));
coinbase.addOutput(new TransactionOutput(params, coinbase, value,
ScriptBuilder.createOutputScript(new ECKey(null, pubKeyTo)).getProgram()));
transactions.add(coinbase);
coinbase.setParent(this);
coinbase.length = coinbase.worldcoinSerialize().length;
adjustLength(transactions.size(), coinbase.length);
}
static final byte[] EMPTY_BYTES = new byte[32];
/**
* Returns a solved block that builds on top of this one. This exists for unit tests.
*/
@VisibleForTesting
public Block createNextBlock(Address to, long time) {
return createNextBlock(to, null, time, EMPTY_BYTES, Utils.toNanoCoins(50, 0));
}
/**
* Returns a solved block that builds on top of this one. This exists for unit tests.
* In this variant you can specify a public key (pubkey) for use in generating coinbase blocks.
*/
Block createNextBlock(@Nullable Address to, @Nullable TransactionOutPoint prevOut, long time,
byte[] pubKey, BigInteger coinbaseValue) {
Block b = new Block(params);
b.setDifficultyTarget(difficultyTarget);
b.addCoinbaseTransaction(pubKey, coinbaseValue);
if (to != null) {
// Add a transaction paying 50 coins to the "to" address.
Transaction t = new Transaction(params);
t.addOutput(new TransactionOutput(params, t, Utils.toNanoCoins(50, 0), to));
// The input does not really need to be a valid signature, as long as it has the right general form.
TransactionInput input;
if (prevOut == null) {
input = new TransactionInput(params, t, Script.createInputScript(EMPTY_BYTES, EMPTY_BYTES));
// Importantly the outpoint hash cannot be zero as that's how we detect a coinbase transaction in isolation
// but it must be unique to avoid 'different' transactions looking the same.
byte[] counter = new byte[32];
counter[0] = (byte) txCounter;
counter[1] = (byte) (txCounter++ >> 8);
input.getOutpoint().setHash(new Sha256Hash(counter));
} else {
input = new TransactionInput(params, t, Script.createInputScript(EMPTY_BYTES, EMPTY_BYTES), prevOut);
}
t.addInput(input);
b.addTransaction(t);
}
b.setPrevBlockHash(getHash());
// Don't let timestamp go backwards
if (getTimeSeconds() >= time)
b.setTime(getTimeSeconds() + 1);
else
b.setTime(time);
b.solve();
try {
b.verifyHeader();
} catch (VerificationException e) {
throw new RuntimeException(e); // Cannot happen.
}
return b;
}
@VisibleForTesting
public Block createNextBlock(@Nullable Address to, TransactionOutPoint prevOut) {
return createNextBlock(to, prevOut, Utils.currentTimeMillis() / 1000, EMPTY_BYTES, Utils.toNanoCoins(50, 0));
}
@VisibleForTesting
public Block createNextBlock(@Nullable Address to, BigInteger value) {
return createNextBlock(to, null, Utils.currentTimeMillis() / 1000, EMPTY_BYTES, value);
}
@VisibleForTesting
public Block createNextBlock(@Nullable Address to) {
return createNextBlock(to, Utils.toNanoCoins(50, 0));
}
@VisibleForTesting
public Block createNextBlockWithCoinbase(byte[] pubKey, BigInteger coinbaseValue) {
return createNextBlock(null, null, Utils.currentTimeMillis() / 1000, pubKey, coinbaseValue);
}
/**
* Create a block sending 50BTC as a coinbase transaction to the public key specified.
* This method is intended for test use only.
*/
@VisibleForTesting
Block createNextBlockWithCoinbase(byte[] pubKey) {
return createNextBlock(null, null, Utils.currentTimeMillis() / 1000, pubKey, Utils.toNanoCoins(50, 0));
}
@VisibleForTesting
boolean isParsedHeader() {
return headerParsed;
}
@VisibleForTesting
boolean isParsedTransactions() {
return transactionsParsed;
}
@VisibleForTesting
boolean isHeaderBytesValid() {
return headerBytesValid;
}
@VisibleForTesting
boolean isTransactionBytesValid() {
return transactionBytesValid;
}
}
| apache-2.0 |
drewwills/cernunnos | cernunnos-core/src/main/java/org/danann/cernunnos/net/ContentsPhrase.java | 2227 | /*
* Copyright 2007 Andrew Wills
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.danann.cernunnos.net;
import java.io.IOException;
import java.io.InputStream;
import java.net.URL;
import org.danann.cernunnos.EntityConfig;
import org.danann.cernunnos.Formula;
import org.danann.cernunnos.Phrase;
import org.danann.cernunnos.Reagent;
import org.danann.cernunnos.ResourceHelper;
import org.danann.cernunnos.SimpleFormula;
import org.danann.cernunnos.TaskRequest;
import org.danann.cernunnos.TaskResponse;
public final class ContentsPhrase implements Phrase {
// Instance Members.
private final ResourceHelper resource = new ResourceHelper();
/*
* Public API.
*/
public Formula getFormula() {
Reagent[] reagents = new Reagent[] {ResourceHelper.CONTEXT_TARGET, ResourceHelper.LOCATION_PHRASE};
return new SimpleFormula(ContentsPhrase.class, reagents);
}
public void init(EntityConfig config) {
// Instance Members.
this.resource.init(config);
}
public Object evaluate(TaskRequest req, TaskResponse res) {
Object rslt = null;
InputStream inpt = null;
try {
URL u = resource.evaluate(req, res);
inpt = u.openStream();
StringBuffer buff = new StringBuffer();
byte[] bytes = new byte[1024];
for (int len = inpt.read(bytes); len > 0; len = inpt.read(bytes)) {
buff.append(new String(bytes, 0, len));
}
rslt = buff.toString();
} catch (Throwable t) {
String msg = "ContentsPhrase terminated unexpectedly.";
throw new RuntimeException(msg, t);
} finally {
if (inpt != null) {
try {
inpt.close();
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
}
}
return rslt;
}
} | apache-2.0 |
OSUCartography/JMapProjLib | src/ch/ethz/karto/gui/ProjectionSelectionPanel.java | 12876 | /*
* ProjectionSelectionPanel.java
*
* Created on September 16, 2006, 2:54 PM
*/
package ch.ethz.karto.gui;
import com.jhlabs.map.Ellipsoid;
import com.jhlabs.map.proj.Projection;
import com.jhlabs.map.proj.ProjectionFactory;
import java.util.ArrayList;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.swing.DefaultComboBoxModel;
import javax.swing.JOptionPane;
import javax.swing.JSlider;
/**
* ProjectionSelectionPanel lets the user select a projection, applies the
* selected projection to a group of lines, and displays basic information about
* the projection.
*
* @author Bernhard Jenny, Institute of Cartography, ETH Zurich.
*/
public class ProjectionSelectionPanel extends javax.swing.JPanel {
/**
* The lines that are displayed. Must be in geographic coordinates
* (degrees).
*/
private ArrayList<MapLine> lines = null;
/**
* Creates new form ProjectionSelectionPanel
*/
public ProjectionSelectionPanel() {
initComponents();
Object[] projNames = ProjectionFactory.getOrderedProjectionNames();
projectionComboBox.setModel(new DefaultComboBoxModel(projNames));
}
private void project() {
boolean inverse = inverseCheckBox.isSelected();
try {
// find the selected name, create the corresponding projection.
String projName = (String) projectionComboBox.getSelectedItem();
Projection projection = ProjectionFactory.getNamedProjection(projName);
// use the selected projection to project the lines,
// and pass the projected lines to the map to display.
if (projection != null) {
projection.setProjectionLongitudeDegrees(lon0Slider.getValue());
projection.setEllipsoid(Ellipsoid.SPHERE);
projection.initialize();
LineProjector projector = new LineProjector();
ArrayList<MapLine> projectedLines = new ArrayList<>();
projector.constructGraticule(projectedLines, projection);
projector.projectLines(lines, projectedLines, projection);
if (inverse && projection.hasInverse()) {
projectedLines = projector.inverse(projectedLines, projection);
}
map.setLines(projectedLines);
} else {
map.setLines(null);
}
// write some descriptive information about the selected projection.
updateProjectionInfo(projection);
} catch (Exception exc) {
String msg = exc.getMessage();
String title = "Error";
JOptionPane.showMessageDialog(selectionPanel, msg, title, JOptionPane.ERROR_MESSAGE);
Logger.getLogger(ProjectionSelectionPanel.class.getName()).log(Level.SEVERE, null, exc);
}
}
/**
* Set the lines that are projected and displayed.
*
* @param lines The lines to project. Must be in geographic coordinates
* (degrees).
*/
public void setLines(ArrayList<MapLine> lines) {
// store the passed lines
this.lines = lines;
// pass the new lines to the map that displays the lines.
map.setLines(lines);
// reset the graphical user interface to the first projection.
projectionComboBox.setSelectedIndex(0);
project();
}
/**
* Write basic information about the projection to the graphical user
* interface.
*
* @projection The Projection that provides the information.
*/
private void updateProjectionInfo(Projection projection) {
if (projection == null) {
descriptionLabel.setText("-");
} else {
descriptionLabel.setText(projection.getDescription());
if (projection.hasInverse()) {
System.out.println("Found inverse equation for " + projection.getName() + ".");
} else {
System.out.println("No inverse equation for " + projection.getName() + ".");
}
}
}
/**
* This method is called from within the constructor to initialize the form.
* WARNING: Do NOT modify this code. The content of this method is always
* regenerated by the Form Editor.
*/
// <editor-fold defaultstate="collapsed" desc="Generated Code">//GEN-BEGIN:initComponents
private void initComponents() {
java.awt.GridBagConstraints gridBagConstraints;
selectionPanel = new javax.swing.JPanel();
projectionComboBox = new javax.swing.JComboBox();
previousProjectionButton = new javax.swing.JButton();
nextProjectionButton = new javax.swing.JButton();
inverseCheckBox = new javax.swing.JCheckBox();
map = new ch.ethz.karto.gui.MapComponent();
infoPanel = new javax.swing.JPanel();
javax.swing.JLabel descriptionLeadLabel = new javax.swing.JLabel();
descriptionLabel = new javax.swing.JLabel();
javax.swing.JLabel longitudeLeadLabel = new javax.swing.JLabel();
lon0Slider = new javax.swing.JSlider();
lon0Label = new javax.swing.JLabel();
setLayout(new java.awt.BorderLayout(10, 10));
selectionPanel.setPreferredSize(new java.awt.Dimension(100, 40));
selectionPanel.setLayout(new java.awt.FlowLayout(java.awt.FlowLayout.LEFT, 5, 10));
projectionComboBox.setMaximumRowCount(40);
projectionComboBox.setModel(new javax.swing.DefaultComboBoxModel(new String[] { "Plate Carre (Geographic)", "Cylindrical Equal-Area", "Cylindrical Conformal (Mercator)", "Conical Equidistant", "Conical Equal-Area (Albers)", "Conical Conformal (Lambert)", "Azimuthal Equidistant", "Azimuthal Equal-Area (Lambert)", "Azimuthal Conformal (Stereographic)", "Azimuthal Orthographic", "Sinusoidal", "Pseudoconical Equal-Area (Bonne)" }));
projectionComboBox.addItemListener(new java.awt.event.ItemListener() {
public void itemStateChanged(java.awt.event.ItemEvent evt) {
projectionComboBoxItemStateChanged(evt);
}
});
selectionPanel.add(projectionComboBox);
previousProjectionButton.setText("<");
previousProjectionButton.setPreferredSize(new java.awt.Dimension(50, 29));
previousProjectionButton.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
previousProjectionButtonActionPerformed(evt);
}
});
selectionPanel.add(previousProjectionButton);
nextProjectionButton.setText(">");
nextProjectionButton.setPreferredSize(new java.awt.Dimension(50, 29));
nextProjectionButton.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
nextProjectionButtonActionPerformed(evt);
}
});
selectionPanel.add(nextProjectionButton);
inverseCheckBox.setText("Test Inverse");
inverseCheckBox.setToolTipText("Applies forward and inverse projection, which should result in a Plate Carre projection.");
inverseCheckBox.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
inverseCheckBoxActionPerformed(evt);
}
});
selectionPanel.add(inverseCheckBox);
add(selectionPanel, java.awt.BorderLayout.NORTH);
map.setPreferredSize(new java.awt.Dimension(400, 300));
add(map, java.awt.BorderLayout.CENTER);
infoPanel.setBorder(javax.swing.BorderFactory.createCompoundBorder(javax.swing.BorderFactory.createTitledBorder(""), javax.swing.BorderFactory.createEmptyBorder(10, 10, 10, 10)));
infoPanel.setMinimumSize(new java.awt.Dimension(400, 96));
infoPanel.setPreferredSize(new java.awt.Dimension(500, 200));
infoPanel.setLayout(new java.awt.GridBagLayout());
descriptionLeadLabel.setText("Description");
gridBagConstraints = new java.awt.GridBagConstraints();
gridBagConstraints.anchor = java.awt.GridBagConstraints.EAST;
infoPanel.add(descriptionLeadLabel, gridBagConstraints);
descriptionLabel.setText("-");
descriptionLabel.setMaximumSize(new java.awt.Dimension(300, 16));
descriptionLabel.setMinimumSize(new java.awt.Dimension(300, 16));
descriptionLabel.setPreferredSize(new java.awt.Dimension(300, 16));
gridBagConstraints = new java.awt.GridBagConstraints();
gridBagConstraints.gridwidth = 2;
gridBagConstraints.fill = java.awt.GridBagConstraints.HORIZONTAL;
gridBagConstraints.anchor = java.awt.GridBagConstraints.WEST;
gridBagConstraints.insets = new java.awt.Insets(2, 10, 2, 0);
infoPanel.add(descriptionLabel, gridBagConstraints);
longitudeLeadLabel.setText("Longitude of Origin");
gridBagConstraints = new java.awt.GridBagConstraints();
gridBagConstraints.gridx = 0;
gridBagConstraints.gridy = 6;
infoPanel.add(longitudeLeadLabel, gridBagConstraints);
lon0Slider.setMaximum(180);
lon0Slider.setMinimum(-180);
lon0Slider.setValue(0);
lon0Slider.setMinimumSize(new java.awt.Dimension(200, 29));
lon0Slider.setPreferredSize(new java.awt.Dimension(200, 29));
lon0Slider.addChangeListener(new javax.swing.event.ChangeListener() {
public void stateChanged(javax.swing.event.ChangeEvent evt) {
lon0SliderStateChanged(evt);
}
});
gridBagConstraints = new java.awt.GridBagConstraints();
gridBagConstraints.gridx = 1;
gridBagConstraints.gridy = 6;
gridBagConstraints.fill = java.awt.GridBagConstraints.HORIZONTAL;
gridBagConstraints.anchor = java.awt.GridBagConstraints.WEST;
gridBagConstraints.insets = new java.awt.Insets(2, 10, 2, 0);
infoPanel.add(lon0Slider, gridBagConstraints);
lon0Label.setText("0");
lon0Label.setMaximumSize(new java.awt.Dimension(50, 16));
lon0Label.setMinimumSize(new java.awt.Dimension(50, 16));
lon0Label.setPreferredSize(new java.awt.Dimension(50, 16));
gridBagConstraints = new java.awt.GridBagConstraints();
gridBagConstraints.gridx = 2;
gridBagConstraints.gridy = 6;
infoPanel.add(lon0Label, gridBagConstraints);
add(infoPanel, java.awt.BorderLayout.SOUTH);
}// </editor-fold>//GEN-END:initComponents
private void lon0SliderStateChanged(javax.swing.event.ChangeEvent evt) {//GEN-FIRST:event_lon0SliderStateChanged
JSlider slider = (JSlider) evt.getSource();
lon0Label.setText(Integer.toString(slider.getValue()));
//if (!slider.getValueIsAdjusting()) {
project();
//}
}//GEN-LAST:event_lon0SliderStateChanged
private void projectionComboBoxItemStateChanged(java.awt.event.ItemEvent evt) {//GEN-FIRST:event_projectionComboBoxItemStateChanged
if (evt.getStateChange() == java.awt.event.ItemEvent.SELECTED) {
project();
}
}//GEN-LAST:event_projectionComboBoxItemStateChanged
private void inverseCheckBoxActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_inverseCheckBoxActionPerformed
project();
}//GEN-LAST:event_inverseCheckBoxActionPerformed
private void previousProjectionButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_previousProjectionButtonActionPerformed
int id = projectionComboBox.getSelectedIndex() - 1;
if (id >= 0) {
projectionComboBox.setSelectedIndex(id);
project();
}
}//GEN-LAST:event_previousProjectionButtonActionPerformed
private void nextProjectionButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_nextProjectionButtonActionPerformed
int id = projectionComboBox.getSelectedIndex() + 1;
if (id < projectionComboBox.getItemCount()) {
projectionComboBox.setSelectedIndex(id);
project();
}
}//GEN-LAST:event_nextProjectionButtonActionPerformed
// Variables declaration - do not modify//GEN-BEGIN:variables
private javax.swing.JLabel descriptionLabel;
private javax.swing.JPanel infoPanel;
private javax.swing.JCheckBox inverseCheckBox;
private javax.swing.JLabel lon0Label;
private javax.swing.JSlider lon0Slider;
private ch.ethz.karto.gui.MapComponent map;
private javax.swing.JButton nextProjectionButton;
private javax.swing.JButton previousProjectionButton;
private javax.swing.JComboBox projectionComboBox;
private javax.swing.JPanel selectionPanel;
// End of variables declaration//GEN-END:variables
}
| apache-2.0 |
rishikanths/umls | src/main/java/edu/isu/umls/Concepts/SemanticType.java | 198 | /**
*
*
*/
package edu.isu.umls.Concepts;
/**
* @author Rishi Saripalle
* @date Sep 28, 2015
* @time 4:56:51 PM
*
* SemanticType
*
*/
public class SemanticType extends AbstractType {
}
| apache-2.0 |
OpenSourceConsulting/athena-chameleon | src/main/java/com/athena/chameleon/engine/entity/xml/application/v1_5/EmptyType.java | 2014 | //
// This file was generated by the JavaTM Architecture for XML Binding(JAXB) Reference Implementation, v2.0-b52-fcs
// See <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a>
// Any modifications to this file will be lost upon recompilation of the source schema.
// Generated on: 2012.10.03 at 02:09:25 오전 KST
//
package com.athena.chameleon.engine.entity.xml.application.v1_5;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlAttribute;
import javax.xml.bind.annotation.XmlID;
import javax.xml.bind.annotation.XmlType;
import javax.xml.bind.annotation.adapters.CollapsedStringAdapter;
import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter;
/**
*
*
* This type is used to designate an empty
* element when used.
*
*
*
* <p>Java class for emptyType complex type.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType name="emptyType">
* <complexContent>
* <restriction base="{http://www.w3.org/2001/XMLSchema}anyType">
* <attribute name="id" type="{http://www.w3.org/2001/XMLSchema}ID" />
* </restriction>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "emptyType")
public class EmptyType {
@XmlAttribute
@XmlJavaTypeAdapter(CollapsedStringAdapter.class)
@XmlID
protected java.lang.String id;
/**
* Gets the value of the id property.
*
* @return
* possible object is
* {@link java.lang.String }
*
*/
public java.lang.String getId() {
return id;
}
/**
* Sets the value of the id property.
*
* @param value
* allowed object is
* {@link java.lang.String }
*
*/
public void setId(java.lang.String value) {
this.id = value;
}
}
| apache-2.0 |
strapdata/elassandra5-rc | core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java | 2582 | /*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.validate.query;
import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.index.query.QueryBuilder;
/**
*
*/
public class ValidateQueryRequestBuilder extends BroadcastOperationRequestBuilder<ValidateQueryRequest, ValidateQueryResponse, ValidateQueryRequestBuilder> {
public ValidateQueryRequestBuilder(ElasticsearchClient client, ValidateQueryAction action) {
super(client, action, new ValidateQueryRequest());
}
/**
* The types of documents the query will run against. Defaults to all types.
*/
public ValidateQueryRequestBuilder setTypes(String... types) {
request.types(types);
return this;
}
/**
* The query to validate.
*
* @see org.elasticsearch.index.query.QueryBuilders
*/
public ValidateQueryRequestBuilder setQuery(QueryBuilder queryBuilder) {
request.query(queryBuilder);
return this;
}
/**
* Indicates if detailed information about the query should be returned.
*
* @see org.elasticsearch.index.query.QueryBuilders
*/
public ValidateQueryRequestBuilder setExplain(boolean explain) {
request.explain(explain);
return this;
}
/**
* Indicates whether the query should be rewritten into primitive queries
*/
public ValidateQueryRequestBuilder setRewrite(boolean rewrite) {
request.rewrite(rewrite);
return this;
}
/**
* Indicates whether the query should be validated on all shards
*/
public ValidateQueryRequestBuilder setAllShards(boolean rewrite) {
request.allShards(rewrite);
return this;
}
}
| apache-2.0 |
kryptnostic/kodex | src/main/java/com/kryptnostic/v2/storage/api/ObjectListingApi.java | 2732 | package com.kryptnostic.v2.storage.api;
import java.util.Set;
import java.util.UUID;
import com.kryptnostic.v2.constants.Names;
import com.kryptnostic.v2.storage.models.VersionedObjectKey;
import retrofit.http.GET;
import retrofit.http.Path;
/**
* This API is used for retrieving paged lists of objects for a user. Ordering is not guaranteed across calls.
*
* @author Matthew Tamayo-Rios <matthew@kryptnostic.com>
*
*/
public interface ObjectListingApi {
String CONTROLLER = "/listing";
String OBJECTS = "/objects";
String VERSIONED = "/versioned";
String TYPE = "type";
String PAGE = Names.PAGE_FIELD;
String PAGE_SIZE = Names.SIZE_FIELD;
String ID = Names.ID_FIELD;
String USER_ID_PATH = "/{" + ID + "}";
String TYPE_ID_PATH = "/type/{" + TYPE + "}";
String TYPE_NAME_PATH = "/typename/{" + TYPE + "}";
String PAGE_SIZE_PATH = "/{" + PAGE_SIZE + "}";
String PAGE_PATH = "/{" + PAGE + "}";
/**
* Retrieves all objects owned by a given a user. This is a slow call / uncached call.
*
* @param userId The userId for which to return the list of paged objects.
* @return The UUID of all objects owned by the user.
*/
@GET( CONTROLLER + OBJECTS + USER_ID_PATH )
Set<UUID> getAllObjectIds( @Path( ID ) UUID userId);
@GET( CONTROLLER + OBJECTS + USER_ID_PATH + PAGE_SIZE_PATH + PAGE_PATH )
Set<UUID> getAllObjectIdsPaged(
@Path( ID ) UUID userId,
@Path( PAGE ) int offset,
@Path( PAGE_SIZE ) int pageSize);
@GET( CONTROLLER + VERSIONED + OBJECTS + USER_ID_PATH + TYPE_ID_PATH )
Set<VersionedObjectKey> getVersionedObjectKeysByType( @Path( ID ) UUID userId, @Path( TYPE ) UUID type);
@GET( CONTROLLER + OBJECTS + USER_ID_PATH + TYPE_ID_PATH )
Iterable<UUID> getObjectIdsByType( @Path( ID ) UUID userId, @Path( TYPE ) UUID type);
@GET( CONTROLLER + OBJECTS + USER_ID_PATH + TYPE_ID_PATH + PAGE_SIZE_PATH + PAGE_PATH )
Set<UUID> getObjectIdsByTypePaged(
@Path( ID ) UUID userId,
@Path( TYPE ) UUID typeId,
@Path( PAGE ) int offset,
@Path( PAGE_SIZE ) int pageSize);
@GET( CONTROLLER + VERSIONED + OBJECTS + USER_ID_PATH + TYPE_ID_PATH + PAGE_SIZE_PATH + PAGE_PATH )
Set<VersionedObjectKey> getVersionedObjectKeysByTypePaged(
@Path( ID ) UUID userId,
@Path( TYPE ) UUID typeId,
@Path( PAGE ) int offset,
@Path( PAGE_SIZE ) int pageSize);
/**
* @param typeName
* @return
*/
@GET( CONTROLLER + OBJECTS + TYPE_NAME_PATH )
UUID getTypeForName( @Path( TYPE ) String typeName);
}
| apache-2.0 |
kaiwinter/exifrename | core/src/test/java/com/github/kaiwinter/exifrename/RenameUcTest.java | 5193 | package com.github.kaiwinter.exifrename;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.net.URISyntaxException;
import java.net.URL;
import java.nio.file.DirectoryStream;
import java.nio.file.FileSystem;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Date;
import java.util.HashSet;
import java.util.Set;
import org.junit.Assert;
import org.junit.Test;
import com.github.kaiwinter.exifrename.type.RenameOperation;
import com.github.kaiwinter.exifrename.uc.RenameUc;
import com.google.common.jimfs.Configuration;
import com.google.common.jimfs.Jimfs;
/**
* Tests for {@link RenameUc}.
*/
public class RenameUcTest {
/**
* The directory contains two images which have different date/time originals.
*/
@Test
public void twoDifferentImages() throws IOException, URISyntaxException {
RenameUc renameUc = new RenameUc();
URL resource = RenameUcTest.class.getResource("/two_different_images");
Path path = Paths.get(resource.toURI());
Set<RenameOperation> renameOperations = renameUc.createRenameOperationsForDirectory(path);
assertEquals(2, renameOperations.size());
assertTrue(renameOperations.stream().anyMatch(item -> {
Path oldName = Paths.get(path.toString(), "image_1.jpg");
Path expectedNewName = Paths.get(path.toString(), "20170701_130000.jpg");
return (oldName).equals(item.getOldFilename()) && (expectedNewName).equals(item.getNewFilenamePath());
}));
assertTrue(renameOperations.stream().anyMatch(item -> {
Path oldName = Paths.get(path.toString(), "image_2.jpg");
Path expectedNewName = Paths.get(path.toString(), "20170701_140000.jpg");
return (oldName).equals(item.getOldFilename()) && (expectedNewName).equals(item.getNewFilenamePath());
}));
}
/**
* The directory to process contains two images which both have the same date/time original.
*/
@Test
public void sameDateTimeOriginal() throws IOException, URISyntaxException {
RenameUc renameUc = new RenameUc();
URL resource = RenameUcTest.class.getResource("/same_datetime_original");
Path path = Paths.get(resource.toURI());
Set<RenameOperation> renameOperations = renameUc.createRenameOperationsForDirectory(path);
assertTrue(renameOperations.stream().anyMatch(item -> {
Path oldName = Paths.get(path.toString(), "image_1.jpg");
Path expectedNewName = Paths.get(path.toString(), "20170701_130000_1.jpg");
return (oldName).equals(item.getOldFilename()) && (expectedNewName).equals(item.getNewFilenamePath());
}));
assertTrue(renameOperations.stream().anyMatch(item -> {
Path oldName = Paths.get(path.toString(), "image_2.jpg");
Path expectedNewName = Paths.get(path.toString(), "20170701_130000_2.jpg");
return (oldName).equals(item.getOldFilename()) && (expectedNewName).equals(item.getNewFilenamePath());
}));
}
/**
* Uses an in-memory file system to test the rename. The directory contains one file which should be renamed
* according to the {@link RenameOperation}.
*/
@Test
public void executeRenameOperations() throws IOException {
FileSystem fs = Jimfs.newFileSystem(Configuration.unix());
Path foo = fs.getPath("/dir");
Files.createDirectory(foo);
Path hello = foo.resolve("IMG_12345.jpg");
Files.write(hello, "".getBytes());
Set<RenameOperation> renameOperations = new HashSet<>();
renameOperations.add(
new RenameOperation(fs.getPath("/dir", "IMG_12345.jpg"), fs.getPath("/dir", "20160701_2336.jpg"), new Date()));
RenameUc renameUc = new RenameUc();
renameUc.executeRenameOperations(renameOperations);
DirectoryStream<Path> newDirectoryStream = Files.newDirectoryStream(foo);
assertEquals(fs.getPath("/dir", "20160701_2336.jpg"), newDirectoryStream.iterator().next());
}
/**
* If no directory is passed the method should return an empty Set.
*/
@Test
public void noDirectory() throws URISyntaxException, IOException {
URL resource = RenameUcTest.class.getResource("/same_datetime_original");
Path path = Paths.get(resource.toURI());
path = path.resolve("image1.jpg");
RenameUc renameUc = new RenameUc();
Set<RenameOperation> renameOperations = renameUc.createRenameOperationsForDirectory(path);
Assert.assertTrue(renameOperations.isEmpty());
}
/**
* Files in the directory which aren't images should be ignored.
*/
@Test
public void wrongFileformat() throws URISyntaxException, IOException {
URL resource = RenameUcTest.class.getResource("/wrong_fileformat");
Path path = Paths.get(resource.toURI());
RenameUc renameUc = new RenameUc();
Set<RenameOperation> renameOperations = renameUc.createRenameOperationsForDirectory(path);
assertEquals(1, renameOperations.size());
}
}
| apache-2.0 |
DemigodsRPG/Stoa | src/main/java/com/demigodsrpg/stoa/util/MessageUtil.java | 7110 | package com.demigodsrpg.stoa.util;
import com.demigodsrpg.stoa.StoaPlugin;
import com.demigodsrpg.stoa.data.TempData;
import com.demigodsrpg.stoa.event.StoaChatEvent;
import org.apache.commons.lang.WordUtils;
import org.bukkit.Bukkit;
import org.bukkit.ChatColor;
import org.bukkit.OfflinePlayer;
import org.bukkit.Server;
import org.bukkit.command.CommandSender;
import org.bukkit.command.ConsoleCommandSender;
import org.bukkit.entity.Player;
import java.util.ArrayList;
import java.util.logging.Logger;
/**
* Module to handle all common messages sent to players or the console.
*/
public class MessageUtil {
private static final Logger LOGGER;
private static final String PLUGIN_NAME;
private static final int LINE_SIZE, IN_GAME_LINE_SIZE;
/**
* Constructor for the Messages.
*
* @param instance The current instance of the Plugin running this module.
*/
static {
LOGGER = StoaPlugin.getInst().getLogger();
PLUGIN_NAME = StoaPlugin.getInst().getName();
LINE_SIZE = 59 - PLUGIN_NAME.length();
IN_GAME_LINE_SIZE = 54;
}
/**
* Sends the message <code>msg</code> as a tagged message to the <code>sender</code>.
*
* @param sender The CommandSender to send the message to (allows console messages).
*/
public static void tagged(CommandSender sender, String msg) {
if (msg.length() + PLUGIN_NAME.length() + 3 > IN_GAME_LINE_SIZE) {
for (String line : wrapInGame(ChatColor.RED + "[" + PLUGIN_NAME + "] " + ChatColor.RESET + msg))
sender.sendMessage(line);
return;
}
sender.sendMessage(ChatColor.RED + "[" + PLUGIN_NAME + "] " + ChatColor.RESET + msg);
}
/**
* Sends the console message <code>msg</code> with "info" tag.
*
* @param msg The message to be sent.
*/
public static void info(String msg) {
if (msg.length() > LINE_SIZE) {
for (String line : wrapConsole(msg))
LOGGER.info(line);
return;
}
LOGGER.info(msg);
}
/**
* Sends the console message <code>msg</code> with "warning" tag.
*
* @param msg The message to be sent.
*/
public static void warning(String msg) {
if (msg.length() > LINE_SIZE) {
for (String line : wrapConsole(msg))
LOGGER.warning(line);
return;
}
LOGGER.warning(msg);
}
/**
* Sends the console message <code>msg</code> with "severe" tag.
*
* @param msg The message to be sent.
*/
public static void severe(String msg) {
if (msg.length() >= LINE_SIZE) {
for (String line : wrapConsole(msg))
LOGGER.severe(line);
return;
}
LOGGER.severe(msg);
}
public static String[] wrapConsole(String msg) {
return WordUtils.wrap(msg, LINE_SIZE, "/n", false).split("/n");
}
/**
* Broadcast to the entire server (all players and the console) the message <code>msg</code>.
*
* @param msg The message to be sent.
*/
public static void broadcast(String msg) {
if (ChatColor.stripColor(msg).length() > IN_GAME_LINE_SIZE) {
Server server = StoaPlugin.getInst().getServer();
for (String line : wrapInGame(msg)) {
StoaChatEvent chatEvent = new StoaChatEvent(line);
Bukkit.getPluginManager().callEvent(chatEvent);
if (!chatEvent.isCancelled()) server.broadcastMessage(line);
}
return;
}
StoaChatEvent chatEvent = new StoaChatEvent(msg);
Bukkit.getPluginManager().callEvent(chatEvent);
if (!chatEvent.isCancelled()) StoaPlugin.getInst().getServer().broadcastMessage(msg);
}
public static String[] wrapInGame(String msg) {
return WordUtils.wrap(msg, IN_GAME_LINE_SIZE, "/n", false).split("/n");
}
/**
* Let the <code>sender</code> know it does not have permission.
*
* @param sender The CommandSender being notified.
* @return True.
*/
public static boolean noPermission(CommandSender sender) {
sender.sendMessage(ChatColor.RED + "You do not have permission to do that.");
return true;
}
/**
* Let the <code>console</code> know it cannot continue.
*
* @param console The console.
* @return True.
*/
public static boolean noConsole(ConsoleCommandSender console) {
console.sendMessage("That can only be executed by a player.");
return true;
}
/**
* Let the <code>player</code> know it cannot continue.
*
* @param player The Player being notified.
* @return True.
*/
public static boolean noPlayer(Player player) {
player.sendMessage("That can only be executed by the console.");
return true;
}
/**
* Clears the chat for <code>player</code> using .sendMessage().
*
* @param player the player whose chat to clear.
*/
public static void clearChat(Player player) {
for (int x = 0; x < 120; x++)
player.sendMessage(" ");
}
/**
* Clears the chat for <code>player</code> using .sendRawMessage().
*
* @param player the player whose chat to clear.
*/
public static void clearRawChat(Player player) {
for (int x = 0; x < 120; x++)
player.sendRawMessage(" ");
}
/**
* Returns an ArrayList of all online admins.
*
* @return ArrayList
*/
public static ArrayList<Player> getOnlineAdmins() {
ArrayList<Player> toReturn = new ArrayList<>();
for (Player player : Bukkit.getOnlinePlayers()) {
if (player.hasPermission("demigods.admin")) toReturn.add(player);
}
return toReturn;
}
/**
* Sends the <code>message</code> to all applicable recipients.
*
* @param message the message to send.
*/
public static void sendDebug(String message) {
// Log to console
if (consoleDebugEnabled()) MessageUtil.info("[Debug] " + ChatColor.stripColor(message));
// Log to online, debugging admins
for (Player player : getOnlineAdmins()) {
if (playerDebugEnabled(player)) player.sendMessage(ChatColor.RED + "[Debug] " + message);
}
}
/**
* Returns true if <code>player</code>'s demigods debugging is enabled.
*
* @param player the player to check.
* @return boolean
*/
public static boolean playerDebugEnabled(OfflinePlayer player) {
return player.getPlayer().hasPermission("demigods.admin") && TempData.TABLE.contains(player.getName(), "temp_admin_debug") && Boolean.parseBoolean(TempData.TABLE.get(player.getName(), "temp_admin_debug").toString());
}
/**
* Returns true if console debugging is enabled in the config.
*
* @return boolean
*/
public static boolean consoleDebugEnabled() {
return StoaPlugin.getInst().getConfig().getBoolean("misc.console_debug");
}
}
| apache-2.0 |
jamesdbloom/mockserver | mockserver-core/src/main/java/org/mockserver/server/initialize/ExpectationInitializerLoader.java | 2523 | package org.mockserver.server.initialize;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.commons.lang3.StringUtils;
import org.mockserver.configuration.ConfigurationProperties;
import org.mockserver.file.FileReader;
import org.mockserver.logging.MockServerLogger;
import org.mockserver.mock.Expectation;
import org.mockserver.serialization.ExpectationSerializer;
import java.lang.reflect.Constructor;
import static org.apache.commons.lang3.StringUtils.isNotBlank;
/**
* @author jamesdbloom
*/
public class ExpectationInitializerLoader {
private final ExpectationSerializer expectationSerializer;
public ExpectationInitializerLoader(MockServerLogger mockServerLogger) {
expectationSerializer = new ExpectationSerializer(mockServerLogger);
}
private Expectation[] retrieveExpectationsFromInitializerClass() {
try {
String initializationClass = ConfigurationProperties.initializationClass();
if (isNotBlank(initializationClass)) {
ClassLoader contextClassLoader = ExpectationInitializerLoader.class.getClassLoader();
if (contextClassLoader != null && StringUtils.isNotEmpty(initializationClass)) {
Constructor<?> initializerClassConstructor = contextClassLoader.loadClass(initializationClass).getDeclaredConstructor();
Object expectationInitializer = initializerClassConstructor.newInstance();
if (expectationInitializer instanceof ExpectationInitializer) {
return ((ExpectationInitializer) expectationInitializer).initializeExpectations();
}
}
}
} catch (Exception e) {
throw new RuntimeException(e);
}
return new Expectation[0];
}
private Expectation[] retrieveExpectationsFromJson() {
String initializationJsonPath = ConfigurationProperties.initializationJsonPath();
if (isNotBlank(initializationJsonPath)) {
return expectationSerializer.deserializeArray(FileReader.readFileFromClassPathOrPath(initializationJsonPath));
}
return new Expectation[0];
}
public Expectation[] loadExpectations() {
final Expectation[] expectationsFromInitializerClass = retrieveExpectationsFromInitializerClass();
final Expectation[] expectationsFromJson = retrieveExpectationsFromJson();
return ArrayUtils.addAll(expectationsFromInitializerClass, expectationsFromJson);
}
}
| apache-2.0 |
fossasia/fossasia-companion-android | app/src/main/java/org/fossasia/db/DatabaseManager.java | 29843 | package org.fossasia.db;
import android.app.SearchManager;
import android.content.ContentValues;
import android.content.Context;
import android.content.Intent;
import android.content.SharedPreferences;
import android.database.Cursor;
import android.database.sqlite.SQLiteDatabase;
import android.database.sqlite.SQLiteStatement;
import android.net.Uri;
import android.provider.BaseColumns;
import android.support.v4.content.LocalBroadcastManager;
import android.text.TextUtils;
import org.fossasia.model.Day;
import org.fossasia.model.FossasiaEvent;
import org.fossasia.model.Person;
import org.fossasia.model.Speaker;
import org.fossasia.model.Sponsor;
import org.fossasia.model.Venue;
import org.fossasia.utils.StringUtils;
import java.util.ArrayList;
/**
* Here comes the badass SQL.
*
* @author Christophe Beyls
*/
public class DatabaseManager {
public static final String ACTION_SCHEDULE_REFRESHED = "be.digitalia.fosdem.action.SCHEDULE_REFRESHED";
public static final String ACTION_ADD_BOOKMARK = "be.digitalia.fosdem.action.ADD_BOOKMARK";
public static final String EXTRA_EVENT_ID = "event_id";
public static final String EXTRA_EVENT_START_TIME = "event_start";
public static final String ACTION_REMOVE_BOOKMARKS = "be.digitalia.fosdem.action.REMOVE_BOOKMARKS";
public static final String EXTRA_EVENT_IDS = "event_ids";
public static final int PERSON_NAME_COLUMN_INDEX = 1;
private static final Uri URI_TRACKS = Uri.parse("sqlite://be.digitalia.fosdem/tracks");
private static final Uri URI_EVENTS = Uri.parse("sqlite://be.digitalia.fosdem/events");
private static final String DB_PREFS_FILE = "database";
private static final String LAST_UPDATE_TIME_PREF = "last_update_time";
private static final String[] COUNT_PROJECTION = new String[]{"count(*)"};
// Ignore conflicts in case of existing person
private static DatabaseManager instance;
private Context context;
private DatabaseHelper helper;
private DatabaseManager(Context context) {
this.context = context;
helper = new DatabaseHelper(context);
}
public static void init(Context context) {
if (instance == null) {
instance = new DatabaseManager(context);
}
}
public static DatabaseManager getInstance() {
return instance;
}
private static long queryNumEntries(SQLiteDatabase db, String table, String selection, String[] selectionArgs) {
Cursor cursor = db.query(table, COUNT_PROJECTION, selection, selectionArgs, null, null, null);
try {
cursor.moveToFirst();
return cursor.getLong(0);
} finally {
cursor.close();
}
}
private static void bindString(SQLiteStatement statement, int index, String value) {
if (value == null) {
statement.bindNull(index);
} else {
statement.bindString(index, value);
}
}
private static void clearDatabase(SQLiteDatabase db) {
db.delete(DatabaseHelper.EVENTS_TABLE_NAME, null, null);
db.delete(DatabaseHelper.EVENTS_TITLES_TABLE_NAME, null, null);
db.delete(DatabaseHelper.PERSONS_TABLE_NAME, null, null);
db.delete(DatabaseHelper.EVENTS_PERSONS_TABLE_NAME, null, null);
db.delete(DatabaseHelper.LINKS_TABLE_NAME, null, null);
db.delete(DatabaseHelper.TRACKS_TABLE_NAME, null, null);
db.delete(DatabaseHelper.DAYS_TABLE_NAME, null, null);
// Deleting Fossasia tables
db.delete(DatabaseHelper.TABLE_NAME_KEY_SPEAKERS, null, null);
db.delete(DatabaseHelper.TABLE_NAME_SCHEDULE, null, null);
db.delete(DatabaseHelper.TABLE_NAME_SPEAKER_EVENT_RELATION, null, null);
db.delete(DatabaseHelper.TABLE_NAME_TRACK, null, null);
db.delete(DatabaseHelper.TABLE_NAME_SPONSOR, null, null);
}
public ArrayList<FossasiaEvent> getBookmarkEvents() {
ArrayList<FossasiaEvent> bookmarkedEvents = new ArrayList<>();
Cursor cursor = helper.getReadableDatabase().rawQuery("SELECT * FROM " + DatabaseHelper.BOOKMARKS_TABLE_NAME, null);
if (cursor.moveToFirst()) {
do {
bookmarkedEvents.add(getEventById(cursor.getInt(0)));
}
while (cursor.moveToNext());
}
cursor.close();
return bookmarkedEvents;
}
public static long toEventId(Cursor cursor) {
return cursor.getLong(0);
}
public static long toEventStartTimeMillis(Cursor cursor) {
return cursor.isNull(1) ? -1L : cursor.getLong(1);
}
public static Person toPerson(Cursor cursor, Person person) {
if (person == null) {
person = new Person();
}
person.setId(cursor.getLong(0));
person.setName(cursor.getString(1));
return person;
}
public static Person toPerson(Cursor cursor) {
return toPerson(cursor, null);
}
private SharedPreferences getSharedPreferences() {
return context.getSharedPreferences(DB_PREFS_FILE, Context.MODE_PRIVATE);
}
public void performInsertQueries(ArrayList<String> queries) {
SQLiteDatabase db = helper.getWritableDatabase();
db.beginTransaction();
for (String query : queries) {
db.execSQL(query);
}
db.setTransactionSuccessful();
db.endTransaction();
}
public void clearDatabase() {
SQLiteDatabase db = helper.getWritableDatabase();
db.beginTransaction();
try {
clearDatabase(db);
db.setTransactionSuccessful();
getSharedPreferences().edit().remove(LAST_UPDATE_TIME_PREF).commit();
} finally {
db.endTransaction();
context.getContentResolver().notifyChange(URI_TRACKS, null);
context.getContentResolver().notifyChange(URI_EVENTS, null);
LocalBroadcastManager.getInstance(context).sendBroadcast(new Intent(ACTION_SCHEDULE_REFRESHED));
}
}
public ArrayList<Day> getDates(String track) {
ArrayList<Day> days = new ArrayList<Day>();
String query = "SELECT date FROM schedule WHERE track='%s' GROUP BY date";
Cursor cursor = helper.getReadableDatabase().rawQuery(String.format(query, track), null);
int count = 0;
if (cursor.moveToFirst()) {
do {
days.add(new Day(count, cursor.getString(0)));
count++;
}
while (cursor.moveToNext());
}
cursor.close();
return days;
}
public FossasiaEvent getEventById(int id) {
FossasiaEvent temp = null;
Cursor cursor = helper.getReadableDatabase().rawQuery("SELECT * FROM schedule WHERE id=" + id, null);
String title;
String subTitle;
String date;
String day;
String startTime;
String abstractText;
String description;
String venue;
String track;
String moderator;
if (cursor.moveToFirst()) {
do {
id = cursor.getInt(0);
title = cursor.getString(1);
subTitle = cursor.getString(2);
date = cursor.getString(3);
day = cursor.getString(4);
startTime = cursor.getString(5);
abstractText = cursor.getString(6);
description = cursor.getString(7);
venue = cursor.getString(8);
track = cursor.getString(9);
moderator = cursor.getString(10);
Cursor cursorSpeaker = helper.getReadableDatabase().rawQuery(String.format("SELECT speaker FROM %s WHERE event_id=%d", DatabaseHelper.TABLE_NAME_SPEAKER_EVENT_RELATION, id), null);
ArrayList<String> speakers = new ArrayList<String>();
if (cursorSpeaker.moveToFirst()) {
do {
speakers.add(cursorSpeaker.getString(0));
}
while (cursorSpeaker.moveToNext());
}
temp = new FossasiaEvent(id, title, subTitle, speakers, date, day, date + " " + startTime, abstractText, description, venue, track, moderator);
}
while (cursor.moveToNext());
}
cursor.close();
return temp;
}
public ArrayList<FossasiaEvent> getEventsByDate(String selectDate) {
Cursor cursor = helper.getReadableDatabase().rawQuery("SELECT * FROM schedule WHERE date='" + selectDate + "'", null);
ArrayList<FossasiaEvent> fossasiaEventList = new ArrayList<FossasiaEvent>();
int id;
String title;
String subTitle;
String date;
String day;
String startTime;
String abstractText;
String description;
String venue;
String track;
String moderator;
if (cursor.moveToFirst()) {
do {
id = cursor.getInt(0);
title = cursor.getString(1);
subTitle = cursor.getString(2);
date = cursor.getString(3);
day = cursor.getString(4);
startTime = cursor.getString(5);
abstractText = cursor.getString(6);
description = cursor.getString(7);
venue = cursor.getString(8);
track = cursor.getString(9);
moderator = cursor.getString(10);
Cursor cursorSpeaker = helper.getReadableDatabase().rawQuery(String.format("SELECT speaker FROM %s WHERE event_id=%d", DatabaseHelper.TABLE_NAME_SPEAKER_EVENT_RELATION, id), null);
ArrayList<String> speakers = new ArrayList<String>();
if (cursorSpeaker.moveToFirst()) {
do {
speakers.add(cursorSpeaker.getString(0));
}
while (cursorSpeaker.moveToNext());
}
fossasiaEventList.add(new FossasiaEvent(id, title, subTitle, speakers, date, day, startTime, abstractText, description, venue, track, moderator));
}
while (cursor.moveToNext());
}
cursor.close();
return fossasiaEventList;
}
public ArrayList<FossasiaEvent> getEventBySpeaker(String name) {
Cursor cursorEvents = helper.getReadableDatabase().rawQuery(String.format("SELECT event FROM %s WHERE speaker='%s'", DatabaseHelper.TABLE_NAME_SPEAKER_EVENT_RELATION, name), null);
ArrayList<String> events = new ArrayList<String>();
if (cursorEvents.moveToFirst()) {
do {
events.add(cursorEvents.getString(0));
}
while (cursorEvents.moveToNext());
}
cursorEvents.close();
ArrayList<FossasiaEvent> fossasiaEventList = new ArrayList<FossasiaEvent>();
for (String event : events) {
Cursor cursor = helper.getReadableDatabase().rawQuery("SELECT * FROM schedule WHERE title='" + StringUtils.removeDiacritics(event) + "'", null);
int id;
String title;
String subTitle;
String date;
String day;
String startTime;
String abstractText;
String description;
String venue;
String track;
String moderator;
if (cursor.moveToFirst()) {
do {
id = cursor.getInt(0);
title = cursor.getString(1);
subTitle = cursor.getString(2);
date = cursor.getString(3);
day = cursor.getString(4);
startTime = cursor.getString(5);
abstractText = cursor.getString(6);
description = cursor.getString(7);
venue = cursor.getString(8);
track = cursor.getString(9);
moderator = cursor.getString(10);
Cursor cursorSpeaker = helper.getReadableDatabase().rawQuery(String.format("SELECT speaker FROM %s WHERE event_id=%d", DatabaseHelper.TABLE_NAME_SPEAKER_EVENT_RELATION, id), null);
ArrayList<String> speakers = new ArrayList<String>();
if (cursorSpeaker.moveToFirst()) {
do {
speakers.add(cursorSpeaker.getString(0));
}
while (cursorSpeaker.moveToNext());
}
fossasiaEventList.add(new FossasiaEvent(id, title, subTitle, speakers, date, day, date + " " + startTime, abstractText, description, venue, track, moderator));
}
while (cursor.moveToNext());
}
cursor.close();
}
return fossasiaEventList;
}
public ArrayList<FossasiaEvent> getEventsByDateandTrack(String selectDate, String track) {
Cursor cursor = helper.getReadableDatabase().rawQuery("SELECT * FROM schedule WHERE date='" + selectDate + "' AND track='" + track + "'", null);
ArrayList<FossasiaEvent> fossasiaEventList = new ArrayList<FossasiaEvent>();
int id;
String title;
String subTitle;
String date;
String day;
String startTime;
String abstractText;
String description;
String venue;
String moderator;
if (cursor.moveToFirst()) {
do {
id = cursor.getInt(0);
title = cursor.getString(1);
subTitle = cursor.getString(2);
date = cursor.getString(3);
day = cursor.getString(4);
startTime = cursor.getString(5);
abstractText = cursor.getString(6);
description = cursor.getString(7);
venue = cursor.getString(8);
track = cursor.getString(9);
moderator = cursor.getString(10);
Cursor cursorSpeaker = helper.getReadableDatabase().rawQuery(String.format("SELECT speaker FROM %s WHERE event_id=%d", DatabaseHelper.TABLE_NAME_SPEAKER_EVENT_RELATION, id), null);
ArrayList<String> speakers = new ArrayList<String>();
if (cursorSpeaker.moveToFirst()) {
do {
speakers.add(cursorSpeaker.getString(0));
}
while (cursorSpeaker.moveToNext());
}
fossasiaEventList.add(new FossasiaEvent(id, title, subTitle, speakers, date, day, startTime, abstractText, description, venue, track, moderator));
}
while (cursor.moveToNext());
}
cursor.close();
return fossasiaEventList;
}
public ArrayList<Speaker> getSpeakers(boolean fetchKeySpeaker) {
Cursor cursor = helper.getReadableDatabase().query(DatabaseHelper.TABLE_NAME_KEY_SPEAKERS, null, null, null, null, null, null);
ArrayList<Speaker> speakers = new ArrayList<Speaker>();
int id;
String name;
String designation;
String profilePicUrl;
String information;
String twitterHandle;
String linkedInUrl;
int isKeySpeaker;
if (cursor.moveToFirst()) {
do {
id = cursor.getInt(0);
name = cursor.getString(1);
designation = cursor.getString(2);
information = cursor.getString(3);
twitterHandle = cursor.getString(4);
linkedInUrl = cursor.getString(5);
profilePicUrl = cursor.getString(6);
isKeySpeaker = cursor.getInt(7);
if (isKeySpeaker == 1 && fetchKeySpeaker) {
speakers.add(new Speaker(id, name, information, linkedInUrl, twitterHandle, designation, profilePicUrl, isKeySpeaker));
} else if (isKeySpeaker == 0 && !fetchKeySpeaker) {
speakers.add(new Speaker(id, name, information, linkedInUrl, twitterHandle, designation, profilePicUrl, isKeySpeaker));
}
}
while (cursor.moveToNext());
}
cursor.close();
return speakers;
}
public Cursor getTracks() {
Cursor cursor = helper.getReadableDatabase().rawQuery(
"SELECT * FROM " + DatabaseHelper.TABLE_NAME_TRACK, null);
return cursor;
}
public ArrayList<Sponsor> getSponsors() {
Cursor cursor = helper.getReadableDatabase().query(DatabaseHelper.TABLE_NAME_SPONSOR, null, null, null, null, null, null);
ArrayList<Sponsor> sponsors = new ArrayList<Sponsor>();
int id;
String name;
String img;
String url;
if (cursor.moveToFirst()) {
do {
id = cursor.getInt(0);
name = cursor.getString(1);
img = cursor.getString(2);
url = cursor.getString(3);
sponsors.add(new Sponsor(id, name, img, url));
}
while (cursor.moveToNext());
}
cursor.close();
return sponsors;
}
/**
* Returns the bookmarks.
*
* @param minStartTime When positive, only return the events starting after this time.
* @return A cursor to Events
*/
public Cursor getBookmarks(long minStartTime) {
String whereCondition;
String[] selectionArgs;
if (minStartTime > 0L) {
whereCondition = " WHERE e.start_time > ?";
selectionArgs = new String[]{String.valueOf(minStartTime)};
} else {
whereCondition = "";
selectionArgs = null;
}
Cursor cursor = helper
.getReadableDatabase()
.rawQuery(
"SELECT e.id AS _id, e.start_time, e.end_time, e.room_name, e.slug, et.title, et.subtitle, e.abstract, e.description, GROUP_CONCAT(p.name, ', '), e.day_index, d.date, t.name, t.type, 1"
+ " FROM "
+ DatabaseHelper.BOOKMARKS_TABLE_NAME
+ " b"
+ " JOIN "
+ DatabaseHelper.EVENTS_TABLE_NAME
+ " e ON b.event_id = e.id"
+ " JOIN "
+ DatabaseHelper.EVENTS_TITLES_TABLE_NAME
+ " et ON e.id = et.rowid"
+ " JOIN "
+ DatabaseHelper.DAYS_TABLE_NAME
+ " d ON e.day_index = d._index"
+ " JOIN "
+ DatabaseHelper.TRACKS_TABLE_NAME
+ " t ON e.track_id = t.id"
+ " LEFT JOIN "
+ DatabaseHelper.EVENTS_PERSONS_TABLE_NAME
+ " ep ON e.id = ep.event_id"
+ " LEFT JOIN "
+ DatabaseHelper.PERSONS_TABLE_NAME
+ " p ON ep.person_id = p.rowid" + whereCondition + " GROUP BY e.id" + " ORDER BY e.start_time ASC", selectionArgs);
cursor.setNotificationUri(context.getContentResolver(), URI_EVENTS);
return cursor;
}
/**
* Search through matching titles, subtitles, track names, person names. We need to use an union of 3 sub-queries because a "match" condition can not be
* accompanied by other conditions in a "where" statement.
*
* @param query
* @return A cursor to Events
*/
public Cursor getSearchResults(String query) {
final String matchQuery = query + "*";
String[] selectionArgs = new String[]{matchQuery, "%" + query + "%", matchQuery};
Cursor cursor = helper
.getReadableDatabase()
.rawQuery(
"SELECT e.id AS _id, e.start_time, e.end_time, e.room_name, e.slug, et.title, et.subtitle, e.abstract, e.description, GROUP_CONCAT(p.name, ', '), e.day_index, d.date, t.name, t.type, b.event_id"
+ " FROM "
+ DatabaseHelper.EVENTS_TABLE_NAME
+ " e"
+ " JOIN "
+ DatabaseHelper.EVENTS_TITLES_TABLE_NAME
+ " et ON e.id = et.rowid"
+ " JOIN "
+ DatabaseHelper.DAYS_TABLE_NAME
+ " d ON e.day_index = d._index"
+ " JOIN "
+ DatabaseHelper.TRACKS_TABLE_NAME
+ " t ON e.track_id = t.id"
+ " LEFT JOIN "
+ DatabaseHelper.EVENTS_PERSONS_TABLE_NAME
+ " ep ON e.id = ep.event_id"
+ " LEFT JOIN "
+ DatabaseHelper.PERSONS_TABLE_NAME
+ " p ON ep.person_id = p.rowid"
+ " LEFT JOIN "
+ DatabaseHelper.BOOKMARKS_TABLE_NAME
+ " b ON e.id = b.event_id"
+ " WHERE e.id IN ( "
+ "SELECT rowid"
+ " FROM "
+ DatabaseHelper.EVENTS_TITLES_TABLE_NAME
+ " WHERE "
+ DatabaseHelper.EVENTS_TITLES_TABLE_NAME
+ " MATCH ?"
+ " UNION "
+ "SELECT e.id"
+ " FROM "
+ DatabaseHelper.EVENTS_TABLE_NAME
+ " e"
+ " JOIN "
+ DatabaseHelper.TRACKS_TABLE_NAME
+ " t ON e.track_id = t.id"
+ " WHERE t.name LIKE ?"
+ " UNION "
+ "SELECT ep.event_id"
+ " FROM "
+ DatabaseHelper.EVENTS_PERSONS_TABLE_NAME
+ " ep"
+ " JOIN "
+ DatabaseHelper.PERSONS_TABLE_NAME
+ " p ON ep.person_id = p.rowid" + " WHERE p.name MATCH ?" + " )" + " GROUP BY e.id" + " ORDER BY e.start_time ASC",
selectionArgs);
cursor.setNotificationUri(context.getContentResolver(), URI_EVENTS);
return cursor;
}
/**
* Method called by SearchSuggestionProvider to return search results in the format expected by the search framework.
*/
public Cursor getSearchSuggestionResults(String query, int limit) {
final String matchQuery = query + "*";
String[] selectionArgs = new String[]{matchQuery, "%" + query + "%", matchQuery, String.valueOf(limit)};
// Query is similar to getSearchResults but returns different columns, does not join the Day table or the Bookmark table and limits the result set.
Cursor cursor = helper.getReadableDatabase().rawQuery(
"SELECT e.id AS " + BaseColumns._ID + ", et.title AS " + SearchManager.SUGGEST_COLUMN_TEXT_1
+ ", IFNULL(GROUP_CONCAT(p.name, ', '), '') || ' - ' || t.name AS " + SearchManager.SUGGEST_COLUMN_TEXT_2 + ", e.id AS "
+ SearchManager.SUGGEST_COLUMN_INTENT_DATA + " FROM " + DatabaseHelper.EVENTS_TABLE_NAME + " e" + " JOIN "
+ DatabaseHelper.EVENTS_TITLES_TABLE_NAME + " et ON e.id = et.rowid" + " JOIN " + DatabaseHelper.TRACKS_TABLE_NAME
+ " t ON e.track_id = t.id" + " LEFT JOIN " + DatabaseHelper.EVENTS_PERSONS_TABLE_NAME + " ep ON e.id = ep.event_id" + " LEFT JOIN "
+ DatabaseHelper.PERSONS_TABLE_NAME + " p ON ep.person_id = p.rowid" + " WHERE e.id IN ( " + "SELECT rowid" + " FROM "
+ DatabaseHelper.EVENTS_TITLES_TABLE_NAME + " WHERE " + DatabaseHelper.EVENTS_TITLES_TABLE_NAME + " MATCH ?" + " UNION "
+ "SELECT e.id" + " FROM " + DatabaseHelper.EVENTS_TABLE_NAME + " e" + " JOIN " + DatabaseHelper.TRACKS_TABLE_NAME
+ " t ON e.track_id = t.id" + " WHERE t.name LIKE ?" + " UNION " + "SELECT ep.event_id" + " FROM "
+ DatabaseHelper.EVENTS_PERSONS_TABLE_NAME + " ep" + " JOIN " + DatabaseHelper.PERSONS_TABLE_NAME + " p ON ep.person_id = p.rowid"
+ " WHERE p.name MATCH ?" + " )" + " GROUP BY e.id" + " ORDER BY e.start_time ASC LIMIT ?", selectionArgs);
return cursor;
}
/**
* Returns all persons in alphabetical order.
*/
public Cursor getPersons() {
Cursor cursor = helper.getReadableDatabase().rawQuery(
"SELECT rowid AS _id, name" + " FROM " + DatabaseHelper.PERSONS_TABLE_NAME + " ORDER BY name COLLATE NOCASE", null);
cursor.setNotificationUri(context.getContentResolver(), URI_EVENTS);
return cursor;
}
public boolean isBookmarked(FossasiaEvent event) {
String[] selectionArgs = new String[]{String.valueOf(event.getId())};
return queryNumEntries(helper.getReadableDatabase(), DatabaseHelper.BOOKMARKS_TABLE_NAME, "event_id = ?", selectionArgs) > 0L;
}
public String getTrackMapUrl(String track) {
Cursor cursor = helper.getReadableDatabase().rawQuery(String.format("SELECT map FROM %s WHERE track='%s'", DatabaseHelper.TABLE_NAME_TRACK_VENUE, track), null);
String map = "htttp://maps.google.com/";
if (cursor.moveToFirst()) {
map = cursor.getString(0);
}
cursor.close();
return map;
}
public boolean addBookmark(FossasiaEvent event) {
boolean complete = false;
SQLiteDatabase db = helper.getWritableDatabase();
db.beginTransaction();
try {
ContentValues values = new ContentValues();
values.put("event_id", event.getId());
long result = db.insert(DatabaseHelper.BOOKMARKS_TABLE_NAME, null, values);
// If the bookmark is already present
if (result == -1L) {
return false;
}
db.setTransactionSuccessful();
complete = true;
return true;
} finally {
db.endTransaction();
if (complete) {
context.getContentResolver().notifyChange(URI_EVENTS, null);
Intent intent = new Intent(ACTION_ADD_BOOKMARK).putExtra(EXTRA_EVENT_ID, event.getId());
// TODO: For now commented this, must implement String to date converter.
// Date startTime = event.getStartTime();
// if (startTime != null) {
// intent.putExtra(EXTRA_EVENT_START_TIME, startTime.getTime());
// }
LocalBroadcastManager.getInstance(context).sendBroadcast(intent);
}
}
}
public boolean removeBookmark(FossasiaEvent event) {
return removeBookmarks(new long[]{event.getId()});
}
public boolean removeBookmark(long eventId) {
return removeBookmarks(new long[]{eventId});
}
public boolean removeBookmarks(long[] eventIds) {
int length = eventIds.length;
if (length == 0) {
throw new IllegalArgumentException("At least one bookmark id to remove must be passed");
}
String[] stringEventIds = new String[length];
for (int i = 0; i < length; ++i) {
stringEventIds[i] = String.valueOf(eventIds[i]);
}
boolean isComplete = false;
SQLiteDatabase db = helper.getWritableDatabase();
db.beginTransaction();
try {
String whereClause = "event_id IN (" + TextUtils.join(",", stringEventIds) + ")";
int count = db.delete(DatabaseHelper.BOOKMARKS_TABLE_NAME, whereClause, null);
if (count == 0) {
return false;
}
db.setTransactionSuccessful();
isComplete = true;
return true;
} finally {
db.endTransaction();
if (isComplete) {
context.getContentResolver().notifyChange(URI_EVENTS, null);
Intent intent = new Intent(ACTION_REMOVE_BOOKMARKS).putExtra(EXTRA_EVENT_IDS, eventIds);
LocalBroadcastManager.getInstance(context).sendBroadcast(intent);
}
}
}
public Venue getVenueFromTrack(String track) {
String query = "SELECT * FROM %s WHERE track='%s'";
Venue ven = null;
Cursor cursor = helper.getReadableDatabase().rawQuery(String.format(query, DatabaseHelper.TABLE_NAME_VENUE, track), null);
if (cursor.moveToFirst()) {
//tract TEXT, venue TEXT, map TEXT, room TEXT, link TEXT, address TEXT, how_to_reach TEXT }
String venue = cursor.getString(1);
String map = cursor.getString(2);
String room = cursor.getString(3);
String link = cursor.getString(4);
String address = cursor.getString(5);
String howToReach = cursor.getString(6);
ven = new Venue(track, venue, map, room, link, address, howToReach);
}
return ven;
}
}
| apache-2.0 |
smanvi-pivotal/geode | geode-core/src/test/java/org/apache/geode/security/ClientDataAuthorizationUsingLegacySecurityWithFailoverDUnitTest.java | 19381 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode.security;
import static org.apache.geode.distributed.ConfigurationProperties.SECURITY_CLIENT_ACCESSOR;
import static org.apache.geode.distributed.ConfigurationProperties.SECURITY_CLIENT_AUTHENTICATOR;
import static org.apache.geode.distributed.ConfigurationProperties.SECURITY_CLIENT_AUTH_INIT;
import static org.apache.geode.distributed.ConfigurationProperties.SERIALIZABLE_OBJECT_FILTER;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
import java.io.Serializable;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.function.Consumer;
import org.apache.logging.log4j.Logger;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.apache.geode.cache.Cache;
import org.apache.geode.cache.EntryEvent;
import org.apache.geode.cache.Region;
import org.apache.geode.cache.RegionFactory;
import org.apache.geode.cache.RegionShortcut;
import org.apache.geode.cache.client.ClientCache;
import org.apache.geode.cache.client.ClientCacheFactory;
import org.apache.geode.cache.client.ClientRegionFactory;
import org.apache.geode.cache.client.ClientRegionShortcut;
import org.apache.geode.cache.client.internal.PoolImpl;
import org.apache.geode.cache.util.CacheListenerAdapter;
import org.apache.geode.internal.logging.LogService;
import org.apache.geode.security.templates.SimpleAccessController;
import org.apache.geode.security.templates.SimpleAuthenticator;
import org.apache.geode.security.templates.UserPasswordAuthInit;
import org.apache.geode.security.templates.UsernamePrincipal;
import org.apache.geode.test.dunit.rules.ClientVM;
import org.apache.geode.test.dunit.rules.ClusterStartupRule;
import org.apache.geode.test.dunit.rules.MemberVM;
import org.apache.geode.test.dunit.standalone.VersionManager;
import org.apache.geode.test.junit.categories.DistributedTest;
import org.apache.geode.test.junit.categories.SecurityTest;
import org.apache.geode.test.junit.rules.VMProvider;
import org.apache.geode.test.junit.runners.CategoryWithParameterizedRunnerFactory;
/**
* This test class reproduces the tests present in
* {@link ClientDataAuthorizationUsingLegacySecurityDUnitTest} and confirms that permissions are
* maintained over failover.
*/
@Category({DistributedTest.class, SecurityTest.class})
@RunWith(Parameterized.class)
@Parameterized.UseParametersRunnerFactory(CategoryWithParameterizedRunnerFactory.class)
public class ClientDataAuthorizationUsingLegacySecurityWithFailoverDUnitTest {
@Rule
public ClusterStartupRule csRule = new ClusterStartupRule();
private MemberVM locator;
private MemberVM server1;
private MemberVM server2;
private static String regionName = "testRegion";
// Some data values against which we will test.
private static final String server_k1 = "server-key-1";
private static final String server_v1 = "server-value-1";
private static final String server_k2 = "server-key-2";
private static final String server_v2 = "server-value-2";
private static Map<String, String> serverData = new HashMap<>();
static {
serverData.put(server_k1, server_v1);
serverData.put(server_k2, server_v2);
}
// Some data values against which we will test.
private static final String client_k1 = "client-key-1";
private static final String client_v1 = "client-value-1";
private static final String client_k2 = "client-key-2";
private static final String client_v2 = "client-value-2";
private static final String client_k3 = "client-key-3";
private static final String client_v3 = "client-value-3";
private static final String client_k4 = "client-key-4";
private static final String client_v4 = "client-value-4";
private static final String client_k5 = "client-key-5";
private static final String client_v5 = "client-value-5";
private static final String client_k6 = "client-key-6";
private static final String client_v6 = "client-value-6";
private static final String client_k7 = "client-key-7";
private static final String client_v7 = "client-value-7";
private static Map<String, String> clientData45 = new HashMap<>();
static {
clientData45.put(client_k4, client_v4);
clientData45.put(client_k5, client_v5);
}
private static Map<String, String> clientData67 = new HashMap<>();
static {
clientData67.put(client_k6, client_v6);
clientData67.put(client_k7, client_v7);
}
// Test against every client version
@Parameterized.Parameter
public String clientVersion;
@Parameterized.Parameters(name = "clientVersion={0}")
public static Collection<String> data() {
return VersionManager.getInstance().getVersions();
}
@Before
public void setup() throws Exception {
Properties clusterMemberProperties = getVMPropertiesWithPermission("cluster,data");
locator = csRule.startLocatorVM(0, clusterMemberProperties);
server1 = csRule.startServerVM(1, clusterMemberProperties, locator.getPort());
server2 = csRule.startServerVM(2, clusterMemberProperties, locator.getPort());
// put some data on the cluster.
server1.invoke(() -> {
Cache cache = ClusterStartupRule.getCache();
RegionFactory<String, String> rf = cache.createRegionFactory(RegionShortcut.REPLICATE);
rf.addCacheListener(new ClientAuthorizationFailoverTestListener());
Region<String, String> region = rf.create(regionName);
region.putAll(serverData);
});
server2.invoke(() -> {
Cache cache = ClusterStartupRule.getCache();
RegionFactory<String, String> rf = cache.createRegionFactory(RegionShortcut.REPLICATE);
Region<String, String> region = rf.create(regionName);
assertThat(region.getAll(serverData.keySet())).containsAllEntriesOf(serverData);
});
}
@Test
public void dataReaderCanStillOnlyReadAfterFailover() throws Exception {
// Connect to the server that will fail
ClientVM client = createAndInitializeClientAndCache("dataRead");
// Client should be able to read and not write.
client.invoke(() -> {
Region<String, String> region = ClusterStartupRule.getClientCache().getRegion(regionName);
// Assert that the client can get
assertThat(region.get(server_k1)).isEqualTo(server_v1);
assertThat(region.get(server_k2, null)).isEqualTo(server_v2);
assertThat(region.getAll(serverData.keySet())).containsAllEntriesOf(serverData);
assertThat(region.getAll(serverData.keySet(), null)).containsAllEntriesOf(serverData);
// Assert that the client cannot put
assertThatThrownBy(() -> region.put(client_k1, client_v1))
.hasCauseInstanceOf(NotAuthorizedException.class);
assertThatThrownBy(() -> region.put(client_k2, client_v2, null))
.hasCauseInstanceOf(NotAuthorizedException.class);
assertThatThrownBy(() -> region.putIfAbsent(client_k3, client_v3))
.hasCauseInstanceOf(NotAuthorizedException.class);
assertThatThrownBy(() -> region.putAll(clientData45))
.hasCauseInstanceOf(NotAuthorizedException.class);
assertThatThrownBy(() -> region.putAll(clientData67, null))
.hasCauseInstanceOf(NotAuthorizedException.class);
});
// Initialize client cache and region. Get the port of the primary connected server.
VMProvider server_to_fail = determinePrimaryServer(client);
// Bring down primary server
server_to_fail.invoke(() -> ClusterStartupRule.getCache().close());
// Confirm failover
VMProvider secondaryServer = (server1.equals(server_to_fail)) ? server2 : server1;
assertThat(secondaryServer).isEqualTo(determinePrimaryServer(client));
// Confirm permissions: client should still only be able to read and not write.
client.invoke(() -> {
Region<String, String> region = ClusterStartupRule.getClientCache().getRegion(regionName);
// Assert that the client can get
assertThat(region.get(server_k1)).isEqualTo(server_v1);
assertThat(region.get(server_k2, null)).isEqualTo(server_v2);
assertThat(region.getAll(serverData.keySet())).containsAllEntriesOf(serverData);
assertThat(region.getAll(serverData.keySet(), null)).containsAllEntriesOf(serverData);
// Assert that the client cannot put
assertThatThrownBy(() -> region.put(client_k1, client_v1))
.hasCauseInstanceOf(NotAuthorizedException.class);
assertThatThrownBy(() -> region.put(client_k2, client_v2, null))
.hasCauseInstanceOf(NotAuthorizedException.class);
assertThatThrownBy(() -> region.putIfAbsent(client_k3, client_v3))
.hasCauseInstanceOf(NotAuthorizedException.class);
assertThatThrownBy(() -> region.putAll(clientData45))
.hasCauseInstanceOf(NotAuthorizedException.class);
assertThatThrownBy(() -> region.putAll(clientData67, null))
.hasCauseInstanceOf(NotAuthorizedException.class);
});
// Confirm that no puts went through
secondaryServer.invoke(() -> {
assertThat(ClusterStartupRule.getCache().getRegion(regionName))
.containsOnlyKeys(server_k1, server_k2).containsAllEntriesOf(serverData);
});
}
@Test
public void dataWriterCanStillOnlyWriteAfterFailover() throws Exception {
// Connect to the server that will fail
ClientVM client = createAndInitializeClientAndCache("dataWrite");
// Client should be able to write but not read.
client.invoke(() -> {
Region<String, String> region = ClusterStartupRule.getClientCache().getRegion(regionName);
// Puts do not throw
// Assert that the client can put
region.put(client_k1, client_v1);
region.put(client_k2, client_v2, null);
region.putIfAbsent(client_k3, client_v3);
region.putAll(clientData45);
region.putAll(clientData67, null);
// Assert that the client cannot get
assertThatThrownBy(() -> region.get(server_k1))
.hasCauseInstanceOf(NotAuthorizedException.class);
assertThatThrownBy(() -> region.get(server_k2, null))
.hasCauseInstanceOf(NotAuthorizedException.class);
// An unauthorized getAll does not throw; it just does not return the requested values.
// See GEODE-3632.
assertThat(region.getAll(serverData.keySet())).isEmpty();
assertThat(region.getAll(serverData.keySet(), null)).isEmpty();
});
// Initialize client cache and region. Get the port of the primary connected server.
VMProvider server_to_fail = determinePrimaryServer(client);
// Bring down primary server
server_to_fail.invoke(() -> ClusterStartupRule.getCache().close());
// Confirm failover
VMProvider secondaryServer = (server1.equals(server_to_fail)) ? server2 : server1;
assertThat(secondaryServer).isEqualTo(determinePrimaryServer(client));
// Confirm permissions: client should still only be able to write and not read.
client.invoke(() -> {
Region<String, String> region = ClusterStartupRule.getClientCache().getRegion(regionName);
// Puts do not throw
// Assert that the client can put
region.put(client_k1, client_v1);
region.put(client_k2, client_v2, null);
region.putIfAbsent(client_k3, client_v3);
region.putAll(clientData45);
region.putAll(clientData67, null);
// Assert that the client cannot get
assertThatThrownBy(() -> region.get(server_k1))
.hasCauseInstanceOf(NotAuthorizedException.class);
assertThatThrownBy(() -> region.get(server_k2, null))
.hasCauseInstanceOf(NotAuthorizedException.class);
// An unauthorized getAll does not throw; it just does not return the requested values.
// See GEODE-3632.
assertThat(region.getAll(serverData.keySet())).isEmpty();
assertThat(region.getAll(serverData.keySet(), null)).isEmpty();
});
}
@Test
public void dataReaderCanRegisterAndUnregisterAcrossFailover() throws Exception {
// Connect to the server that will fail
ClientVM client = createAndInitializeClientAndCache("dataRead");
// Client should be able to register and unregister interests.
client.invoke(() -> {
Region<String, String> region = ClusterStartupRule.getClientCache().getRegion(regionName);
region.unregisterInterest(client_k1);
region.registerInterest(client_k1);
region.registerInterestRegex("client-.*");
region.unregisterInterestRegex("client-.*");
});
// Initialize client cache and region. Get the port of the primary connected server.
VMProvider server_to_fail = determinePrimaryServer(client);
// Bring down primary server
server_to_fail.invoke(() -> ClusterStartupRule.getCache().close());
// Confirm failover
VMProvider secondaryServer = (server1.equals(server_to_fail)) ? server2 : server1;
assertThat(secondaryServer).isEqualTo(determinePrimaryServer(client));
// Confirm permissions.
client.invoke(() -> {
Region<String, String> region = ClusterStartupRule.getClientCache().getRegion(regionName);
region.unregisterInterest(client_k1);
region.registerInterest(client_k1);
region.registerInterestRegex("client-.*");
region.unregisterInterestRegex("client-.*");
});
}
@Test
public void dataWriterCannotRegisterInterestAcrossFailover() throws Exception {
Properties props = getVMPropertiesWithPermission("dataWrite");
int server1Port = this.server1.getPort();
int server2Port = this.server2.getPort();
Consumer<ClientCacheFactory> cacheSetup = (Serializable & Consumer<ClientCacheFactory>) cf -> cf
.addPoolServer("localhost", server1Port).addPoolServer("localhost", server2Port)
.setPoolSubscriptionEnabled(true).setPoolSubscriptionRedundancy(2);
ClientVM client1 = csRule.startClientVM(3, props, cacheSetup, clientVersion);
// Initialize cache
client1.invoke(() -> {
ClientCache cache = ClusterStartupRule.getClientCache();
ClientRegionFactory<String, String> rf =
cache.createClientRegionFactory(ClientRegionShortcut.PROXY);
Region<String, String> region1 = rf.create(regionName);
});
ClientVM client = client1;
// Client should be able to register and unregister interests.
client.invoke(() -> {
Region<String, String> region = ClusterStartupRule.getClientCache().getRegion(regionName);
assertThatThrownBy(() -> region.registerInterest(client_k1))
.hasCauseInstanceOf(NotAuthorizedException.class);
assertThatThrownBy(() -> region.registerInterestRegex("client-.*"))
.hasCauseInstanceOf(NotAuthorizedException.class);
// Attempts to unregister will fail client-side. The client maintains its own lists of
// interests and, since the above failed, any unregistering of interest will prematurely
// terminate before contacting any server. No authorization is attempted.
});
// Initialize client cache and region. Get the port of the primary connected server.
VMProvider server_to_fail = determinePrimaryServer(client);
// Bring down primary server
server_to_fail.invoke(() -> ClusterStartupRule.getCache().close());
// Confirm failover
VMProvider secondaryServer = (server1.equals(server_to_fail)) ? server2 : server1;
assertThat(secondaryServer).isEqualTo(determinePrimaryServer(client));
// Confirm permissions.
client.invoke(() -> {
Region<String, String> region = ClusterStartupRule.getClientCache().getRegion(regionName);
assertThatThrownBy(() -> region.registerInterest(client_k1))
.hasCauseInstanceOf(NotAuthorizedException.class);
assertThatThrownBy(() -> region.registerInterestRegex("client-.*"))
.hasCauseInstanceOf(NotAuthorizedException.class);
});
}
private ClientVM createAndInitializeClientAndCache(String withPermission) throws Exception {
Properties props = getVMPropertiesWithPermission(withPermission);
int server1Port = this.server1.getPort();
int server2Port = this.server2.getPort();
Consumer<ClientCacheFactory> cacheSetup = (Serializable & Consumer<ClientCacheFactory>) cf -> cf
.addPoolServer("localhost", server1Port).addPoolServer("localhost", server2Port)
.setPoolSubscriptionEnabled(true).setPoolSubscriptionRedundancy(2);
ClientVM client = csRule.startClientVM(3, props, cacheSetup, clientVersion);
// Initialize cache
client.invoke(() -> {
ClientCache cache = ClusterStartupRule.getClientCache();
ClientRegionFactory<String, String> rf =
cache.createClientRegionFactory(ClientRegionShortcut.PROXY);
Region<String, String> region = rf.create(regionName);
});
return client;
}
private VMProvider determinePrimaryServer(ClientVM client) {
int primaryPort = client.invoke(() -> {
ClientCache cache = ClusterStartupRule.getClientCache();
PoolImpl pool = (PoolImpl) cache.getDefaultPool();
return pool.getPrimaryPort();
});
return (primaryPort == server1.getPort()) ? server1 : server2;
}
private Properties getVMPropertiesWithPermission(String permission) {
Properties props = new Properties();
// Using the legacy security framework
props.setProperty(SECURITY_CLIENT_AUTHENTICATOR,
SimpleAuthenticator.class.getCanonicalName() + ".create");
props.setProperty(SECURITY_CLIENT_ACCESSOR,
SimpleAccessController.class.getCanonicalName() + ".create");
// Using the given username/permission string
props.setProperty(UserPasswordAuthInit.USER_NAME, permission);
props.setProperty(UserPasswordAuthInit.PASSWORD, permission);
props.setProperty(SECURITY_CLIENT_AUTH_INIT,
UserPasswordAuthInit.class.getCanonicalName() + ".create");
// We can't sent the object filter property versions before 1.4.0 because
// it's not a valid property, but we must set it in 140 and above to allow
// serialization of UsernamePrincipal
if (clientVersion.compareTo("140") >= 0) {
props.setProperty(SERIALIZABLE_OBJECT_FILTER, UsernamePrincipal.class.getCanonicalName());
}
return props;
}
/** A trivial listener */
private static class ClientAuthorizationFailoverTestListener
extends CacheListenerAdapter<String, String> {
private static final Logger logger = LogService.getLogger();
@Override
public void afterCreate(EntryEvent<String, String> event) {
logger.info("In afterCreate");
}
}
}
| apache-2.0 |
liuyongfeng90/NSCGApp | app/src/main/java/com/nscg/app/util/BaiduLocation/LocationBean.java | 4555 | package com.nscg.app.util.BaiduLocation;
import java.io.Serializable;
/**
* @ClassName: LocationBean
* @Description: 定位信息实体类
*/
public class LocationBean implements /* Parcelable, */Cloneable, Serializable {
/**
*
*/
private static final long serialVersionUID = 1L;
/**
*
*/
private String uid;
private String locName;// 地名
private String province;// 省名
private String city;// 城市
private String district;// 区名
private String street;// 街道
private String streetNum;// 街道號
private Double latitude;// 纬度
private Double longitude;// 经度
private String time;
private int locType;
private float radius;
// gps才有的
private float speed;
private int satellite;
private float direction;
// wifi才有的
private String addStr;// 具体地址
private int operationers;
// 用户信息的
private String userId;
private String userName;
private String userAvator;
// 额外输入的详细地名
private String detailAddInput;
public String getUid() {
return uid;
}
public void setUid(String uid) {
this.uid = uid;
}
public String getLocName() {
return locName;
}
public void setLocName(String locName) {
this.locName = locName;
}
public String getProvince() {
return province;
}
public void setProvince(String province) {
this.province = province;
}
public String getCity() {
return city;
}
public void setCity(String city) {
this.city = city;
}
public String getDistrict() {
return district;
}
public void setDistrict(String district) {
this.district = district;
}
public String getStreet() {
return street;
}
public void setStreet(String street) {
this.street = street;
}
public String getStreetNum() {
return streetNum;
}
public void setStreetNum(String streetNum) {
this.streetNum = streetNum;
}
public Double getLatitude() {
return latitude;
}
public void setLatitude(Double latitude) {
this.latitude = latitude;
}
public Double getLongitude() {
return longitude;
}
public void setLongitude(Double longitude) {
this.longitude = longitude;
}
public String getTime() {
return time;
}
public void setTime(String time) {
this.time = time;
}
public int getLocType() {
return locType;
}
public void setLocType(int locType) {
this.locType = locType;
}
public float getRadius() {
return radius;
}
public void setRadius(float radius) {
this.radius = radius;
}
public float getSpeed() {
return speed;
}
public void setSpeed(float speed) {
this.speed = speed;
}
public int getSatellite() {
return satellite;
}
public void setSatellite(int satellite) {
this.satellite = satellite;
}
public float getDirection() {
return direction;
}
public void setDirection(float direction) {
this.direction = direction;
}
public String getAddStr() {
return addStr;
}
public void setAddStr(String addStr) {
this.addStr = addStr;
}
public int getOperationers() {
return operationers;
}
public void setOperationers(int operationers) {
this.operationers = operationers;
}
public String getUserId() {
return userId;
}
public void setUserId(String userId) {
this.userId = userId;
}
public String getUserName() {
return userName;
}
public void setUserName(String userName) {
this.userName = userName;
}
public String getUserAvator() {
return userAvator;
}
public void setUserAvator(String userAvator) {
this.userAvator = userAvator;
}
public String getDetailAddInput() {
return detailAddInput;
}
public void setDetailAddInput(String detailAddInput) {
this.detailAddInput = detailAddInput;
}
@Override
public Object clone() {
LocationBean o = null;
try {
// Object中的clone()识别出你要复制的是哪一个对象。
o = (LocationBean) super.clone();
} catch (CloneNotSupportedException e) {
}
return o;
}
}
| apache-2.0 |
menacher/netty | buffer/src/main/java/io/netty/buffer/FilteredMessageBuf.java | 4797 | /*
* Copyright 2013 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer;
import java.util.Collection;
import java.util.Iterator;
public abstract class FilteredMessageBuf implements MessageBuf<Object> {
protected final MessageBuf<Object> buf;
@SuppressWarnings("unchecked")
protected FilteredMessageBuf(MessageBuf<?> buf) {
if (buf == null) {
throw new NullPointerException("buf");
}
this.buf = (MessageBuf<Object>) buf;
}
protected abstract Object filter(Object msg);
@Override
public int drainTo(Collection<? super Object> c) {
return buf.drainTo(c);
}
@Override
public int drainTo(Collection<? super Object> c, int maxElements) {
return buf.drainTo(c, maxElements);
}
@Override
public BufType type() {
return buf.type();
}
@Override
public int maxCapacity() {
return buf.maxCapacity();
}
@Override
public boolean isReadable() {
return buf.isReadable();
}
@Override
public boolean isReadable(int size) {
return buf.isReadable(size);
}
@Override
public boolean isWritable() {
return buf.isWritable();
}
@Override
public boolean isWritable(int size) {
return buf.isWritable(size);
}
@Override
public boolean add(Object e) {
if (e == null) {
throw new NullPointerException("e");
}
e = filter(e);
ensureNonNull(e);
return buf.add(e);
}
@Override
public boolean offer(Object e) {
if (e == null) {
throw new NullPointerException("e");
}
e = filter(e);
ensureNonNull(e);
return buf.offer(e);
}
private void ensureNonNull(Object e) {
if (e == null) {
throw new IllegalStateException(getClass().getSimpleName() + ".filter() returned null");
}
}
@Override
public Object remove() {
return buf.remove();
}
@Override
public Object poll() {
return buf.poll();
}
@Override
public Object element() {
return buf.element();
}
@Override
public Object peek() {
return buf.peek();
}
@Override
public int size() {
return buf.size();
}
@Override
public boolean isEmpty() {
return buf.isEmpty();
}
@Override
public boolean contains(Object o) {
return buf.contains(o);
}
@Override
public Iterator<Object> iterator() {
return buf.iterator();
}
@Override
public Object[] toArray() {
return buf.toArray();
}
@Override
public <T> T[] toArray(T[] a) {
return buf.toArray(a);
}
@Override
public boolean remove(Object o) {
return buf.remove(o);
}
@Override
public boolean containsAll(Collection<?> c) {
return buf.containsAll(c);
}
@Override
public boolean addAll(Collection<?> c) {
int i = 0;
boolean added = false;
for (Object e: c) {
if (e == null) {
throw new NullPointerException("c[" + i + ']');
}
e = filter(e);
ensureNonNull(e);
added |= buf.add(e);
}
return added;
}
@Override
public boolean removeAll(Collection<?> c) {
return buf.removeAll(c);
}
@Override
public boolean retainAll(Collection<?> c) {
return buf.retainAll(c);
}
@Override
public void clear() {
buf.clear();
}
@Override
public int refCnt() {
return buf.refCnt();
}
@Override
public MessageBuf<Object> retain() {
buf.retain();
return this;
}
@Override
public MessageBuf<Object> retain(int increment) {
buf.retain(increment);
return this;
}
@Override
public boolean release() {
return buf.release();
}
@Override
public boolean release(int decrement) {
return buf.release(decrement);
}
@Override
public String toString() {
return getClass().getSimpleName() + '(' + buf + ')';
}
}
| apache-2.0 |
Yangyazheng/azkaban_annotation | azkaban-common/src/main/java/azkaban/trigger/ConditionChecker.java | 1257 | /*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.trigger;
import java.util.Map;
/**
* 条件检查器
*/
public interface ConditionChecker {
/**
* 检查是否满足条件,如果返回的是true将会触发相应的action
* @return
*/
Object eval();
Object getNum();
/**
* 重置条件
*/
void reset();
/**
* 获取检查器的id
* @return
*/
String getId();
/**
* 获取检查器的类型
* @return
*/
String getType();
ConditionChecker fromJson(Object obj) throws Exception;
Object toJson();
void stopChecker();
void setContext(Map<String, Object> context);
long getNextCheckTime();
}
| apache-2.0 |
tonymcveigh/worker-fw | worker-api/src/main/java/com/hpe/caf/api/worker/Worker.java | 3652 | /*
* (c) Copyright 2015-2016 Hewlett Packard Enterprise Development LP
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hpe.caf.api.worker;
/**
* A Worker can be constructed in any way as per suits the developer, but should only perform the bare
* minimum of tasks in the constructor to set itself up to perform the computational work. At some point
* after construction, the worker-core framework will call through to doWork(), at which point this Worker
* will be on its own separately managed thread and can start performing useful operations. If the Worker
* throws an exception from the constructor, this task will be rejected back onto the queue (and eventually
* it may be dropped, depending upon the WorkerQueue implementation).
*
* There are no limits upon time taken for the Worker to perform its task, but it must at some point
* terminate either via throwing an exception returning from doWork() by returning a WorkerResponse object.
* The Worker base class has various utility methods for returning a WorkerResponse,
* such as createSuccessResult, createFailureResult, and createTaskSubmission. Preferably a Worker will
* always return one of these as opposed to throwing a WorkerException out of the object.
*
* Finally, a Worker has methods to classify the type of work it is performing (an "identifier") and another
* method that returns the integer API version of the task data. These are typically defined in your shareed
* package that contains the task and result classes, but are used here for constructing a WorkerResponse.
* @since 9.0
*/
public interface Worker
{
/**
* Start the work on a task.
* @return the result of the worker operation, and appropriate result data
* @throws InterruptedException indicates that the task is being aborted as requested by the framework
* @throws TaskRejectedException indicates this Worker wishes to abandon this task and defer its execution
* @throws TaskFailedException if the Worker fails in an unrecoverable fashion
* @throws InvalidTaskException if the Worker fails to understand the task to process
*/
WorkerResponse doWork()
throws InterruptedException, TaskRejectedException, InvalidTaskException;
/**
* @return a string to uniquely identify the sort of tasks this worker will do
*/
String getWorkerIdentifier();
/**
* This should return a number that identifies the API version that this worker uses, and should
* be incremented when the format of the task data (or result data) changes. Internal code-logic
* changes should not affect the API version.
* @return a numeral that identifies the API version of the worker
*/
int getWorkerApiVersion();
/**
* In case of a Worker's doWork() method failing with an unhandled exception, it is expected a
* Worker should be able to return a general result.
* @param t the throwable that caused the unhandled Worker failure
* @return a response in case of a general unhandled exception failure scenario
*/
WorkerResponse getGeneralFailureResult(Throwable t);
}
| apache-2.0 |
laonong16/leetcode | facebook/Max Consecutive Ones II/Accepted-3ms-238172204.java | 777 | //
// Generated by fetch-leetcode-submission project on GitHub.
// https://github.com/gitzhou/fetch-leetcode-submission
// Contact Me: aaron67[AT]aaron67.cc
//
// Max Consecutive Ones II
// https://leetcode.com/problems/max-consecutive-ones-ii/
//
class Solution {
public int findMaxConsecutiveOnes(int[] nums) {
int left = 0;
int zeros = 0;
int result = 0;
for (int right = 0; right< nums.length; right++) {
if (nums[right] == 0) {
zeros++;
}
while(zeros > 1) {
if(nums[left] == 0) {
zeros--;
}
left++;
}
result = Math.max(result, right - left + 1);
}
return result;
}
}
| apache-2.0 |
j123b567/j2mod | src/main/java/com/ghgande/j2mod/modbus/msg/WriteCoilRequest.java | 5366 | /*
* Copyright 2002-2016 jamod & j2mod development teams
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ghgande.j2mod.modbus.msg;
import com.ghgande.j2mod.modbus.Modbus;
import com.ghgande.j2mod.modbus.net.AbstractModbusListener;
import com.ghgande.j2mod.modbus.procimg.DigitalOut;
import com.ghgande.j2mod.modbus.procimg.IllegalAddressException;
import com.ghgande.j2mod.modbus.procimg.ProcessImage;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
/**
* Class implementing a <tt>WriteCoilRequest</tt>. The implementation directly
* correlates with the class 0 function <i>write coil (FC 5)</i>. It
* encapsulates the corresponding request message.
*
* @author Dieter Wimberger
* @author Steve O'Hara (4energy)
* @version 2.0 (March 2016)
*/
public final class WriteCoilRequest extends ModbusRequest {
// instance attributes
private int reference;
private boolean coil;
/**
* Constructs a new <tt>WriteCoilRequest</tt> instance.
*/
public WriteCoilRequest() {
super();
setFunctionCode(Modbus.WRITE_COIL);
setDataLength(4);
}
/**
* Constructs a new <tt>WriteCoilRequest</tt> instance with a given
* reference and state to be written.
*
* @param ref the reference number of the register to read from.
* @param b true if the coil should be set of false if it should be unset.
*/
public WriteCoilRequest(int ref, boolean b) {
super();
setFunctionCode(Modbus.WRITE_COIL);
setDataLength(4);
setReference(ref);
setCoil(b);
}
public ModbusResponse getResponse() {
WriteCoilResponse response = new WriteCoilResponse();
response.setHeadless(isHeadless());
if (!isHeadless()) {
response.setProtocolID(getProtocolID());
response.setTransactionID(getTransactionID());
}
response.setFunctionCode(getFunctionCode());
response.setUnitID(getUnitID());
return response;
}
@Override
public ModbusResponse createResponse(AbstractModbusListener listener) {
WriteCoilResponse response;
DigitalOut dout;
// 1. get process image
ProcessImage procimg = listener.getProcessImage(getUnitID());
// 2. get coil
try {
dout = procimg.getDigitalOut(getReference());
// 3. set coil
dout.set(getCoil());
}
catch (IllegalAddressException iaex) {
return createExceptionResponse(Modbus.ILLEGAL_ADDRESS_EXCEPTION);
}
response = (WriteCoilResponse)getResponse();
response.setReference(getReference());
response.setCoil(getCoil());
return response;
}
/**
* Returns the reference of the register of the coil that should be written
* to with this <tt>ReadCoilsRequest</tt>.
*
* @return the reference of the coil's register.
*/
public int getReference() {
return reference;
}
/**
* Sets the reference of the register of the coil that should be written to
* with this <tt>ReadCoilsRequest</tt>.
* <p>
*
* @param ref the reference of the coil's register.
*/
public void setReference(int ref) {
reference = ref;
}
/**
* Returns the state that should be written with this
* <tt>WriteCoilRequest</tt>.
*
* @return true if the coil should be set of false if it should be unset.
*/
public boolean getCoil() {
return coil;
}
/**
* Sets the state that should be written with this <tt>WriteCoilRequest</tt>.
*
* @param b true if the coil should be set of false if it should be unset.
*/
public void setCoil(boolean b) {
coil = b;
}
public void writeData(DataOutput dout) throws IOException {
dout.writeShort(reference);
if (coil) {
dout.write(Modbus.COIL_ON_BYTES, 0, 2);
}
else {
dout.write(Modbus.COIL_OFF_BYTES, 0, 2);
}
}
public void readData(DataInput din) throws IOException {
reference = din.readUnsignedShort();
if (din.readByte() == Modbus.COIL_ON) {
coil = true;
}
else {
coil = false;
}
// discard the next byte.
din.readByte();
}
public byte[] getMessage() {
byte result[] = new byte[4];
result[0] = (byte)((reference >> 8) & 0xff);
result[1] = (byte)(reference & 0xff);
if (coil) {
result[2] = Modbus.COIL_ON_BYTES[0];
result[3] = Modbus.COIL_ON_BYTES[1];
}
else {
result[2] = Modbus.COIL_OFF_BYTES[0];
result[3] = Modbus.COIL_OFF_BYTES[1];
}
return result;
}
} | apache-2.0 |
SamuelBenkeCalabresi/Exploring-Deep-Learning-Architectures | src/dataset/AbstractDataset.java | 328 | package dataset;
/**
* The aim is to create a better package
* and handler methods from the data
* to be used from the machine learning models*/
public abstract class AbstractDataset {
/* The data on any row of the matrix */
protected final int[] data;
public AbstractDataset(int[] data) {
this.data = data;
}
}
| apache-2.0 |
zzcclp/carbondata | integration/spark/src/main/scala/org/apache/carbondata/index/secondary/SecondaryIndex.java | 7102 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.index.secondary;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import com.google.common.collect.Sets;
import org.apache.carbondata.common.logging.LogServiceFactory;
import org.apache.carbondata.core.constants.CarbonCommonConstants;
import org.apache.carbondata.core.datastore.block.SegmentProperties;
import org.apache.carbondata.core.index.IndexUtil;
import org.apache.carbondata.core.index.dev.IndexModel;
import org.apache.carbondata.core.index.dev.cgindex.CoarseGrainIndex;
import org.apache.carbondata.core.indexstore.Blocklet;
import org.apache.carbondata.core.indexstore.ExtendedBlocklet;
import org.apache.carbondata.core.metadata.schema.table.CarbonTable;
import org.apache.carbondata.core.scan.expression.Expression;
import org.apache.carbondata.core.scan.filter.executer.FilterExecutor;
import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf;
import org.apache.carbondata.core.statusmanager.LoadMetadataDetails;
import org.apache.carbondata.core.statusmanager.SegmentStatus;
import org.apache.carbondata.core.statusmanager.SegmentStatusManager;
import org.apache.carbondata.core.util.path.CarbonTablePath;
import org.apache.carbondata.index.secondary.SecondaryIndexModel.PositionReferenceInfo;
import org.apache.log4j.Logger;
/**
* Secondary Index to prune at blocklet level.
*/
public class SecondaryIndex extends CoarseGrainIndex {
private static final Logger LOGGER =
LogServiceFactory.getLogService(SecondaryIndex.class.getName());
private String indexName;
private String currentSegmentId;
private Set<String> validSegmentIds;
private PositionReferenceInfo positionReferenceInfo;
private List<ExtendedBlocklet> defaultIndexPrunedBlocklet;
public void setDefaultIndexPrunedBlocklet(List<ExtendedBlocklet> extendedBlockletList) {
this.defaultIndexPrunedBlocklet = extendedBlockletList;
}
@Override
public void init(IndexModel indexModel) {
assert (indexModel instanceof SecondaryIndexModel);
SecondaryIndexModel model = (SecondaryIndexModel) indexModel;
indexName = model.getIndexName();
currentSegmentId = model.getCurrentSegmentId();
validSegmentIds = new HashSet<>(model.getValidSegmentIds());
positionReferenceInfo = model.getPositionReferenceInfo();
}
public void validateSegmentList(String indexPath) {
LoadMetadataDetails[] loadMetadataDetails = SegmentStatusManager
.readLoadMetadata(CarbonTablePath.getMetadataPath(indexPath));
Set<String> validSISegments = new HashSet<>();
for (LoadMetadataDetails loadMetadataDetail : loadMetadataDetails) {
if (loadMetadataDetail.getSegmentStatus() == SegmentStatus.SUCCESS
|| loadMetadataDetail.getSegmentStatus() == SegmentStatus.MARKED_FOR_UPDATE
|| loadMetadataDetail.getSegmentStatus() == SegmentStatus.LOAD_PARTIAL_SUCCESS) {
validSISegments.add(loadMetadataDetail.getLoadName());
}
}
validSegmentIds = Sets.intersection(validSISegments, validSegmentIds);
}
private Set<String> getPositionReferences(String databaseName, String indexName,
Expression expression) {
/* If the position references are not obtained yet(i.e., prune happening for the first valid
segment), then get them from the given index table with the given filter from all the valid
segments at once and store them as map of segmentId to set of position references in that
particular segment. Upon the subsequent prune for other segments, return the position
references for the respective segment from the map directly */
if (!positionReferenceInfo.isFetched()) {
Object[] rows = IndexUtil.getPositionReferences(String
.format("select distinct positionReference from %s.%s where insegment('%s') and %s",
databaseName, indexName, String.join(",", validSegmentIds),
expression.getStatement()));
for (Object row : rows) {
String positionReference = (String) row;
int blockletPathIndex = positionReference.indexOf("/");
String blockletPath = positionReference.substring(blockletPathIndex + 1);
int segEndIndex = blockletPath.lastIndexOf(CarbonCommonConstants.DASH);
int segStartIndex = blockletPath.lastIndexOf(CarbonCommonConstants.DASH, segEndIndex - 1);
Set<String> blockletPaths = positionReferenceInfo.getSegmentToPosReferences()
.computeIfAbsent(blockletPath.substring(segStartIndex + 1, segEndIndex),
k -> new HashSet<>());
blockletPaths.add(blockletPath);
}
positionReferenceInfo.setFetched(true);
}
Set<String> blockletPaths =
positionReferenceInfo.getSegmentToPosReferences().get(currentSegmentId);
return blockletPaths != null ? blockletPaths : new HashSet<>();
}
@Override
public List<Blocklet> prune(FilterResolverIntf filterExp, SegmentProperties segmentProperties,
FilterExecutor filterExecutor, CarbonTable carbonTable) {
Set<String> blockletPaths = getPositionReferences(carbonTable.getDatabaseName(), indexName,
filterExp.getFilterExpression());
List<Blocklet> blocklets = new ArrayList<>();
if (!this.validSegmentIds.contains(currentSegmentId)) {
// if current segment is not a valid SI segment then
// add the list of blocklet pruned by default index.
blocklets.addAll(defaultIndexPrunedBlocklet);
} else {
for (String blockletPath : blockletPaths) {
blockletPath = blockletPath.substring(blockletPath.indexOf(CarbonCommonConstants.DASH) + 1)
.replace(CarbonCommonConstants.UNDERSCORE, CarbonTablePath.BATCH_PREFIX);
int blockletIndex = blockletPath.lastIndexOf("/");
blocklets.add(new Blocklet(blockletPath.substring(0, blockletIndex),
blockletPath.substring(blockletIndex + 1)));
}
}
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(String
.format("Secondary Index pruned blocklet count for segment %s is %d ", currentSegmentId,
blocklets.size()));
}
return blocklets;
}
@Override
public boolean isScanRequired(FilterResolverIntf filterExp) {
return true;
}
@Override
public void clear() {
}
@Override
public void finish() {
}
}
| apache-2.0 |
robjcaskey/Unofficial-Coffee-Mud-Upstream | com/planet_ink/coffee_mud/WebMacros/FileData.java | 2684 | package com.planet_ink.coffee_mud.WebMacros;
import com.planet_ink.coffee_mud.core.interfaces.*;
import com.planet_ink.coffee_mud.core.*;
import com.planet_ink.coffee_mud.Abilities.interfaces.*;
import com.planet_ink.coffee_mud.Areas.interfaces.*;
import com.planet_ink.coffee_mud.Behaviors.interfaces.*;
import com.planet_ink.coffee_mud.CharClasses.interfaces.*;
import com.planet_ink.coffee_mud.Libraries.interfaces.*;
import com.planet_ink.coffee_mud.Common.interfaces.*;
import com.planet_ink.coffee_mud.Exits.interfaces.*;
import com.planet_ink.coffee_mud.Items.interfaces.*;
import com.planet_ink.coffee_mud.Locales.interfaces.*;
import com.planet_ink.coffee_mud.MOBS.interfaces.*;
import com.planet_ink.coffee_mud.Races.interfaces.*;
import java.util.*;
import com.planet_ink.coffee_mud.core.exceptions.HTTPServerException;
/*
Copyright 2000-2010 Bo Zimmerman
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
public class FileData extends StdWebMacro
{
public String name() {return this.getClass().getName().substring(this.getClass().getName().lastIndexOf('.')+1);}
public boolean isAWebPath(){return true;}
public boolean preferBinary(){return true;}
public String getFilename(ExternalHTTPRequests httpReq, String filename)
{
String path=httpReq.getRequestParameter("PATH");
if(path==null) return filename;
String file=httpReq.getRequestParameter("FILE");
if(file==null) return filename;
return path+"/"+file;
}
public byte[] runBinaryMacro(ExternalHTTPRequests httpReq, String parm) throws HTTPServerException
{
String filename=getFilename(httpReq,"");
if(filename.length()==0) return null;
MOB M=CMLib.players().getLoadPlayer(Authenticate.getLogin(httpReq));
if(M==null) return null;
CMFile F=new CMFile(filename,M,false);
if((!F.exists())||(!F.canRead())) return null;
return F.raw();
}
public String runMacro(ExternalHTTPRequests httpReq, String parm) throws HTTPServerException
{
return "[Unimplemented string method!]";
}
} | apache-2.0 |
dbracewell/hermes | hermes-core/src/main/java/com/davidbracewell/hermes/driver/EmbeddingQuery.java | 1867 | package com.davidbracewell.hermes.driver;
import com.davidbracewell.apollo.ml.Model;
import com.davidbracewell.apollo.ml.embedding.Embedding;
import com.davidbracewell.cli.Option;
import com.davidbracewell.hermes.HermesCommandLineApp;
import com.davidbracewell.io.resource.Resource;
import java.io.Console;
/**
* @author David B. Bracewell
*/
public class EmbeddingQuery extends HermesCommandLineApp {
@Option(description = "The embedding model to query.", required = true)
private Resource model;
@Override
protected void programLogic() throws Exception {
Embedding embedding = Model.read(model);
Console console = System.console();
String line;
do {
line = console.readLine("query:> ");
if (line.equals("?quit") || line.equals("?q")) {
System.exit(0);
} else if (line.startsWith("?search") || line.startsWith("?s")) {
String search = line.substring(line.indexOf(' ')).trim();
embedding.keys().parallelStream()
.filter(term -> term.startsWith(search))
.forEach(term -> System.out.println(" " + term));
} else if (embedding.contains(line)) {
embedding.nearest(line.toLowerCase(), 10).forEach(
slv -> System.out.println(" " + slv.getLabel() + " : " + slv.getWeight()));
System.out.println();
} else {
System.out.println("!! " + line + " is not in the dictionary");
}
} while (!line.equals("q!"));
}
public static void main(String[] args) throws Exception {
String[] nargs = new String[args.length + 2];
System.arraycopy(args, 0, nargs, 0, args.length);
nargs[nargs.length - 2] = "--input";
nargs[nargs.length - 1] = "/dev/null";
new EmbeddingQuery().run(nargs);
}
}//END OF EmbeddingQuery
| apache-2.0 |
bluecking/KOPT | uebung02/src/Main.java | 453 | import knapsack.Instance;
import javax.xml.bind.SchemaOutputResolver;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
public class Main {
public void test(Instance instance) {
Double[] newArray = new Double[instance.getSize()];
for(int i = 0; i < instance.getSize(); i++) {
newArray[i] = (double) instance.getValue(i) / instance.getWeight(i);
}
Arrays.sort( newArray, Collections.reverseOrder());
}
}
| apache-2.0 |
longjl/Douban-Android-SDK-OAuth2 | src/com/bluesunshine/douban4droid/model/app/AccessToken.java | 1750 | package com.bluesunshine.douban4droid.model.app;
import java.io.Serializable;
/**
*
* @author Zhibo Wei <uglytroll@dongxuexidu.com>
*/
public class AccessToken implements Serializable {
private String accessToken = null;
private Integer expiresIn = null;
private String refreshToken = null;
private String doubanUserId = null;
/**
* @return the accessToken
*/
public String getAccessToken() {
return accessToken;
}
/**
* @param accessToken the accessToken to set
*/
public void setAccessToken(String accessToken) {
this.accessToken = accessToken;
}
/**
* @return the expiresIn
*/
public Integer getExpiresIn() {
return expiresIn;
}
/**
* @param expiresIn the expiresIn to set
*/
public void setExpiresIn(Integer expiresIn) {
this.expiresIn = expiresIn;
}
/**
* @return the refreshToken
*/
public String getRefreshToken() {
return refreshToken;
}
/**
* @param refreshToken the refreshToken to set
*/
public void setRefreshToken(String refreshToken) {
this.refreshToken = refreshToken;
}
/**
* @return the doubanUserId
*/
public String getDoubanUserId() {
return doubanUserId;
}
/**
* @param doubanUserId the doubanUserId to set
*/
public void setDoubanUserId(String doubanUserId) {
this.doubanUserId = doubanUserId;
}
public AccessToken() {
}
public AccessToken (String accessToken) {
this.accessToken = accessToken;
}
public AccessToken (String accessToken, int expiresIn, String refreshToken, String doubanUserId) {
this.accessToken = accessToken;
this.doubanUserId = doubanUserId;
this.expiresIn = expiresIn;
this.refreshToken = refreshToken;
}
}
| apache-2.0 |
chenyongzhen123/PartTimeJob | app/src/main/java/com/bzu/fshiner/parttimejob/activity/ListFragment.java | 2556 | package com.bzu.fshiner.parttimejob.activity;
import android.app.Fragment;
import android.content.Context;
import android.content.Intent;
import android.os.Bundle;
import android.support.annotation.Nullable;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.AbsListView;
import android.widget.AdapterView;
import android.widget.ListView;
import android.widget.Toast;
import com.bzu.fshiner.parttimejob.R;
import com.bzu.fshiner.parttimejob.adapter.JobAdapter;
import com.bzu.fshiner.parttimejob.model.Job;
import java.util.List;
import cn.bmob.v3.BmobQuery;
import cn.bmob.v3.exception.BmobException;
import cn.bmob.v3.listener.FindListener;
/**
* Created by Administrator on 2017/5/9.
*/
public class ListFragment extends Fragment implements AdapterView.OnItemClickListener,AbsListView.OnScrollListener {
private ListView listview;
private Context mContext;
@Override
public void onCreate(@Nullable Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
this.mContext = getActivity();
}
public View onCreateView(LayoutInflater inflater, ViewGroup container,Bundle savedInstanceState){
return inflater.inflate(R.layout.list_fragment,container,false);
}
@Override
public void onActivityCreated(@Nullable Bundle savedInstanceState) {
super.onActivityCreated(savedInstanceState);
listview = (ListView) getActivity().findViewById(R.id.list);
listview.setOnItemClickListener(this);
listview.setOnScrollListener(this);
refresh();
}
public void refresh() {
BmobQuery<Job> query = new BmobQuery<Job>();
query.setLimit(50);
query.findObjects(new FindListener<Job>() {
@Override
public void done(List<Job> list, BmobException e) {
if(e==null){
listview.setAdapter(new JobAdapter(mContext,R.layout.job_item,list));
}
}
});
}
@Override
public void onScrollStateChanged(AbsListView absListView, int i) {
}
@Override
public void onScroll(AbsListView absListView, int i, int i1, int i2) {
}
@Override
public void onItemClick(AdapterView<?> adapterView, View view, int i, long l) {
// String ii = (String) listview.getItemAtPosition(i);
// Toast.makeText(mContext,ii,Toast.LENGTH_LONG).show();
Intent intent = new Intent(getActivity(),MoreActivity.class);
startActivity(intent);
}
}
| apache-2.0 |
lorban/terracotta-auditor | src/main/java/org/terracotta/auditor/verifier/Values.java | 1171 | /*
* Copyright Terracotta, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.terracotta.auditor.verifier;
import java.util.Set;
/**
* @author Ludovic Orban
*/
public class Values {
private final Set<RecordValue> committedValues;
private final Set<RecordValue> intermediateValues;
Values(Set<RecordValue> committedValues, Set<RecordValue> intermediateValues) {
this.committedValues = committedValues;
this.intermediateValues = intermediateValues;
}
public Set<RecordValue> getCommittedValues() {
return committedValues;
}
public Set<RecordValue> getIntermediateValues() {
return intermediateValues;
}
}
| apache-2.0 |
kamir/WikiExplorer.NG | src/main/java/research/wikinetworks/NodePairList.java | 5196 | package research.wikinetworks;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Hashtable;
import java.util.Iterator;
import java.util.Set;
import java.util.Vector;
import com.cloudera.wikiexplorer.ng.util.NodeGroup;
/**
*
* @author kamir
*/
public class NodePairList {
Vector<NodePair> pairs = null;
public Vector<NodePair> getPairs() {
return pairs;
}
public NodeGroup ng = null;
static HashMap<Integer,Vector<Integer>> map = null;
public NodePairList() { }
public String getNodeGroupsFn() {
String state_ext = NodeGroup.getStateExtension();
return this.f.getName() + state_ext +".ng";
};
public String getFn() {
return this.f.getName();
};
public void read( File _f ) throws IOException {
f = _f;
int i = 0;
pairs = new Vector<NodePair>();
map = new HashMap<Integer,Vector<Integer>>();
BufferedReader br = new BufferedReader( new FileReader( f.getAbsolutePath() ) );
int lc = 0;
while( br.ready() ) {
String line = br.readLine();
if ( line.startsWith("#") ){
}
else {
lc++;
int[] pids = getIdsForNodePair(line);
NodePair np = new NodePair();
np.pageIDA = pids[0];
np.pageIDB = pids[1];
if ( pids[0] == pids[1] ) i++;
pairs.add(np);
Vector<Integer> v = map.get(pids[0]);
if ( v == null ) {
v = new Vector<Integer>();
map.put(pids[0], v);
}
v.add(pids[1]);
Vector<Integer> v2 = map.get(pids[1]);
if ( v2 == null ) {
v2 = new Vector<Integer>();
map.put(pids[1], v2);
}
v2.add(pids[0]);
}
}
System.out.println(">>> NodePairList:line count=" + lc );
System.out.println(">>> NodePairList:pairs size=" + pairs.size() );
System.out.println(">>> NodePairList:self-links=" + i );
System.out.println(">>> NodePairList:real-links=" + (pairs.size() - i) );
ng = new NodeGroup( pairs );
};
public NodePairList( Vector<NodePair> _pairs ) {
pairs = _pairs;
}
File f = null;
public void store( File _f ) throws IOException {
f = _f;
System.out.println(">>> REAL LINKED NODES: " + f.getAbsolutePath() + "[" + pairs.size() + "]" );
BufferedWriter bw = new BufferedWriter( new FileWriter( f.getAbsolutePath() ) );
for( NodePair p : pairs ) {
bw.write( p.pageIDA + "\t" + p.pageIDB + "\t" + "1" + "\n");
}
bw.flush();
bw.close();
}
public static int[] getIdsForNodePair( String s) {
int[] ids = new int[2];
String[] a = s.split("\t");
ids[0] = Integer.parseInt( a[0] );
ids[1] = Integer.parseInt( a[1] );
return ids;
}
public static String[] getNamesForNodePair( String s ) {
String[] names = new String[2];
return names;
}
public void writeClearList() throws IOException {
FileWriter fw = new FileWriter( f.getAbsolutePath() + ".clear" );
for ( int i : ng.ids ) {
String name = PageNameLoader.getPagenameForId(i);
String line = i + "\t" + name + getListOfLinked( i );
fw.write( line );
fw.flush();
}
fw.close();
};
private String getListOfLinked(int i) {
StringBuffer sb = new StringBuffer();
Vector<Integer> v = map.get(i);
Enumeration<Integer> en = v.elements();
sb.append( " ["+v.size()+"]" + "\n");
while( en.hasMoreElements() ) {
int idL = en.nextElement();
String link = " - ";
if ( toCompare != null ) {
link = getHasLink(i,idL);
}
sb.append( "\t" + link + " " + idL + "\t" + PageNameLoader.getPagenameForId( idL ) + "\n");
}
return sb.toString();
}
public Hashtable<Integer,Integer> getDegreeList() {
Hashtable<Integer,Integer> data = new Hashtable<Integer,Integer>();
Set keys = map.keySet();
Iterator it = keys.iterator();
while( it.hasNext() ) {
Integer id = (Integer) it.next();
Vector links = map.get(id);
data.put(id, links.size() );
}
return data;
}
NodePairList toCompare = null;
public void writeClearList(NodePairList statischesNetz) throws IOException {
toCompare = statischesNetz;
writeClearList();
}
private String getHasLink(int i, int idL) {
String back = " - ";
NodePair np = new NodePair();
np.pageIDA = i;
np.pageIDB = idL;
if ( toCompare.pairs.contains(np ) ) back = "[+]";
return back;
}
}
| apache-2.0 |
talenguyen/LceBinding | lcebinding/src/main/java/vn/tale/lcebinding/LceBinding.java | 1787 | package vn.tale.lcebinding;
import rx.Observable;
import rx.Subscription;
import rx.functions.Action1;
import rx.subscriptions.CompositeSubscription;
/**
* Author giangnguyen. Created on 4/1/16.
*/
public class LceBinding {
private CompositeSubscription subscriptions = null;
public void bind(LoadingContentError lce, ShowHideView loadingView,
ShowHideView contentView,
final ErrorView errorView) {
if (subscriptions == null) {
subscriptions = new CompositeSubscription();
}
bindShowHide(lce.isLoading(), loadingView);
bindShowHide(lce.isShowContent(), contentView);
bindShowHide(lce.isError(), errorView);
bindErrorMessage(lce.errorMessage(), errorView);
}
public void bindShowHide(Observable<Boolean> showHideStream, final ShowHideView showHideView) {
if (subscriptions == null) {
subscriptions = new CompositeSubscription();
}
final Subscription subscription = showHideStream.subscribe(new Action1<Boolean>() {
@Override public void call(Boolean show) {
if (show) {
showHideView.show();
} else {
showHideView.hide();
}
}
});
subscriptions.add(subscription);
}
public void bindErrorMessage(Observable<String> msgStream, final ErrorView errorView) {
if (subscriptions == null) {
subscriptions = new CompositeSubscription();
}
final Subscription subscription = msgStream.subscribe(new Action1<String>() {
@Override
public void call(String msg) {
errorView.setError(msg);
}
});
subscriptions.add(subscription);
}
public void unbind() {
if (subscriptions == null) {
return;
}
subscriptions.unsubscribe();
subscriptions.clear();
subscriptions = null;
}
} | apache-2.0 |
kingargyle/turmeric-bot | camel-core/src/main/java/org/apache/camel/processor/UnitOfWorkProcessor.java | 6025 | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.processor;
import org.apache.camel.AsyncCallback;
import org.apache.camel.AsyncProcessor;
import org.apache.camel.Exchange;
import org.apache.camel.Processor;
import org.apache.camel.impl.DefaultUnitOfWork;
import org.apache.camel.spi.RouteContext;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import static org.apache.camel.util.ObjectHelper.wrapRuntimeCamelException;
/**
* Ensures the {@link Exchange} is routed under the boundaries of an {@link org.apache.camel.spi.UnitOfWork}.
* <p/>
* Handles calling the {@link org.apache.camel.spi.UnitOfWork#done(org.apache.camel.Exchange)} method
* when processing of an {@link Exchange} is complete.
*/
public final class UnitOfWorkProcessor extends DelegateAsyncProcessor {
private static final transient Log LOG = LogFactory.getLog(UnitOfWorkProcessor.class);
private final RouteContext routeContext;
private final String routeId;
public UnitOfWorkProcessor(Processor processor) {
this(null, processor);
}
public UnitOfWorkProcessor(AsyncProcessor processor) {
this(null, processor);
}
public UnitOfWorkProcessor(RouteContext routeContext, Processor processor) {
super(processor);
this.routeContext = routeContext;
if (routeContext != null) {
this.routeId = routeContext.getRoute().idOrCreate(routeContext.getCamelContext().getNodeIdFactory());
} else {
this.routeId = null;
}
}
public UnitOfWorkProcessor(RouteContext routeContext, AsyncProcessor processor) {
super(processor);
this.routeContext = routeContext;
if (routeContext != null) {
this.routeId = routeContext.getRoute().idOrCreate(routeContext.getCamelContext().getNodeIdFactory());
} else {
this.routeId = null;
}
}
@Override
public String toString() {
return "UnitOfWork(" + processor + ")";
}
public RouteContext getRouteContext() {
return routeContext;
}
@Override
public boolean process(final Exchange exchange, final AsyncCallback callback) {
// if the exchange doesn't have from route id set, then set it if it originated
// from this unit of work
if (routeId != null && exchange.getFromRouteId() == null) {
exchange.setFromRouteId(routeId);
}
if (exchange.getUnitOfWork() == null) {
// If there is no existing UoW, then we should start one and
// terminate it once processing is completed for the exchange.
final DefaultUnitOfWork uow = new DefaultUnitOfWork(exchange);
exchange.setUnitOfWork(uow);
try {
uow.start();
} catch (Exception e) {
callback.done(true);
exchange.setException(e);
return true;
}
// process the exchange
try {
return processor.process(exchange, new AsyncCallback() {
public void done(boolean doneSync) {
// Order here matters. We need to complete the callbacks
// since they will likely update the exchange with some final results.
try {
callback.done(doneSync);
} finally {
doneUow(uow, exchange);
}
}
});
} catch (Throwable e) {
LOG.warn("Caught unhandled exception while processing ExchangeId: " + exchange.getExchangeId(), e);
// fallback and catch any exceptions the process may not have caught
// we must ensure to done the UoW in all cases and issue done on the callback
exchange.setException(e);
// Order here matters. We need to complete the callbacks
// since they will likely update the exchange with some final results.
try {
callback.done(true);
} finally {
doneUow(uow, exchange);
}
return true;
}
} else {
// There was an existing UoW, so we should just pass through..
// so that the guy the initiated the UoW can terminate it.
return processor.process(exchange, callback);
}
}
private void doneUow(DefaultUnitOfWork uow, Exchange exchange) {
// unit of work is done
try {
if (exchange.getUnitOfWork() != null) {
exchange.getUnitOfWork().done(exchange);
}
} catch (Throwable e) {
LOG.warn("Exception occurred during done UnitOfWork for Exchange: " + exchange
+ ". This exception will be ignored.", e);
}
try {
uow.stop();
} catch (Throwable e) {
LOG.warn("Exception occurred during stopping UnitOfWork for Exchange: " + exchange
+ ". This exception will be ignored.", e);
}
exchange.setUnitOfWork(null);
}
}
| apache-2.0 |
conteit/scoresmanager | src/it/conteit/scoresmanager/control/ApplicationSystem.java | 8897 | package it.conteit.scoresmanager.control;
import java.awt.Component;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.logging.ConsoleHandler;
import java.util.logging.Handler;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.swing.JOptionPane;
import javax.swing.SwingUtilities;
import it.conteit.scoresmanager.control.commands.Command;
import it.conteit.scoresmanager.control.commands.CommandExecutionException;
import it.conteit.scoresmanager.presentation.AbstractPresentation;
public class ApplicationSystem {
private static final String APPLICATION_NAME = "Scores Manager";
private static ApplicationSystem instance = null;
private static Logger logger = Logger.getLogger("ScoresManager-ApplicationSystem");
private static boolean IS_MAC;
private static boolean SAVE_ON_EXIT = true;
private static String AUTO_EXPORT = null;
private LinkedList<IPresentationListListener> listeners = new LinkedList<IPresentationListListener>();
private ArrayList<Class<? extends AbstractPresentation>> pres = new ArrayList<Class<? extends AbstractPresentation>>();
private int defaultPres = 0;
public ApplicationSystem(){
logger.setLevel(Level.ALL);
Handler handler = new ConsoleHandler();
logger.addHandler(handler);
}
public String showInput(String message, String defaultValue, Component parent){
return JOptionPane.showInputDialog(parent, message, defaultValue);
}
public DialogResults showQuestion(String message, Component parent){
int res = JOptionPane.showConfirmDialog(parent, message, APPLICATION_NAME, JOptionPane.YES_NO_OPTION);
if(res == JOptionPane.CANCEL_OPTION){
return DialogResults.CANCEL;
} else if(res == JOptionPane.YES_OPTION){
return DialogResults.YES;
} else {
return DialogResults.NO;
}
}
public DialogResults showConfirmation(String message, Component parent){
int res = JOptionPane.showConfirmDialog(parent, message, APPLICATION_NAME, JOptionPane.OK_CANCEL_OPTION);
if(res == JOptionPane.CANCEL_OPTION){
return DialogResults.CANCEL;
} else {
return DialogResults.OK;
}
}
public DialogResults showQuestionWithCancel(String message, Component parent){
int res = JOptionPane.showConfirmDialog(parent, message, APPLICATION_NAME, JOptionPane.YES_NO_CANCEL_OPTION);
if(res == JOptionPane.YES_OPTION){
return DialogResults.YES;
} else {
return DialogResults.NO;
}
}
public int showOptionDialog(String message, Object[] options, Component parent){
return JOptionPane.showOptionDialog(parent, message, APPLICATION_NAME, JOptionPane.OK_CANCEL_OPTION, JOptionPane.QUESTION_MESSAGE , null, options, -1);
}
public void showInformation(final String message, final Component parent){
SwingUtilities.invokeLater(new Runnable(){
public void run() {
JOptionPane.showMessageDialog(parent, message, APPLICATION_NAME, JOptionPane.INFORMATION_MESSAGE);
}
});
}
public void showWarning(final String message, final Component parent){
SwingUtilities.invokeLater(new Runnable(){
public void run() {
JOptionPane.showMessageDialog(parent, message, APPLICATION_NAME, JOptionPane.WARNING_MESSAGE);
}
});
}
public void showError(final String message, final Component parent){
SwingUtilities.invokeLater(new Runnable(){
public void run() {
JOptionPane.showMessageDialog(parent, message, APPLICATION_NAME, JOptionPane.ERROR_MESSAGE);
}
});
}
public void logInfo(String info){
logger.log(Level.INFO, info);
}
public void logWarning(String warning){
logger.log(Level.WARNING, warning);
}
public void logError(String error){
logger.log(Level.SEVERE, error);
}
public void execute(Command cmd) throws CommandExecutionException{
cmd.execute();
}
// Da definire bene, anche la gestione degli argomenti, forse meglio parsing esterno e passaggio di un oggetto
@SuppressWarnings("unchecked")
public static void initialize(String[] args){
instance = new ApplicationSystem();
String lcOSName = System.getProperty("os.name").toLowerCase();
IS_MAC = lcOSName.startsWith("mac os x");
if(IS_MAC){
System.setProperty("apple.laf.useScreenMenuBar", "true");
System.setProperty("com.apple.mrj.application.apple.menu.about.name", APPLICATION_NAME);
//new it.conteit.scoresmanager.gui.MacMenuHandler();
}
File f = new File(System.getProperty("user.dir") + "/options");
BufferedReader bufRd = null;
try {
FileReader rd = new FileReader(f);
bufRd = new BufferedReader(rd);
SAVE_ON_EXIT = Boolean.parseBoolean(bufRd.readLine());
AUTO_EXPORT = bufRd.readLine();
File auto_exp = new File(AUTO_EXPORT);
if(!auto_exp.isDirectory() || !auto_exp.exists()){
AUTO_EXPORT = null;
}
instance.setDefaultPresIndex(Integer.parseInt(bufRd.readLine()));
String pkg_class;
while((pkg_class = bufRd.readLine()) != null){
instance.addPresentation((Class<? extends AbstractPresentation>) Class.forName(pkg_class));
}
} catch (Exception e) {
instance.logWarning("Cannot register presentation module: " + e.getMessage());
} finally {
if(bufRd != null){
try {
bufRd.close();
} catch (IOException e) {
ApplicationSystem.getInstance().logError(e.getMessage());
}
}
}
}
public boolean isMac(){
return IS_MAC;
}
public String getApplicationDirectory(){
return System.getProperty("user.dir");
}
public void setSaveOnExit(boolean save){
SAVE_ON_EXIT = save;
save();
}
public boolean saveOnExit(){
return SAVE_ON_EXIT;
}
public void setAutoExportDir(String dir){
AUTO_EXPORT = dir;
save();
}
public void setAutoExportDir(File dir){
if(dir != null){
setAutoExportDir(dir.getPath());
} else {
setAutoExportDir((String) null);
}
}
public boolean autoExportEnabled(){
return (AUTO_EXPORT != null);
}
public String getAutoExportDir(){
return AUTO_EXPORT;
}
private void save(){
File f = new File(System.getProperty("user.dir") + "/options");
if(f.exists()){
f.delete();
try {
f.createNewFile();
} catch (IOException e) {
ApplicationSystem.getInstance().logError(e.getMessage());
}
}
BufferedWriter bufWr = null;
try {
FileWriter wr = new FileWriter(f);
bufWr = new BufferedWriter(wr);
bufWr.write("" + SAVE_ON_EXIT);
bufWr.newLine();
if(AUTO_EXPORT != null){
bufWr.write(AUTO_EXPORT);
bufWr.newLine();
}
bufWr.write(""+defaultPres);
bufWr.newLine();
for (Class<? extends AbstractPresentation> p : pres){
bufWr.write(p.getCanonicalName());
bufWr.newLine();
}
bufWr.flush();
} catch (Exception e) {
ApplicationSystem.getInstance().logError(e.getMessage());
} finally {
if(bufWr != null){
try {
bufWr.close();
} catch (IOException e) {
ApplicationSystem.getInstance().logError(e.getMessage());
}
}
}
}
public void addPresentation(Class<? extends AbstractPresentation> presClass){
pres.add(presClass);
for (IPresentationListListener m : listeners){
m.presentationAdded(presClass, isDefaultPresentation(presClass));
}
}
public void removePresentation(Class<? extends AbstractPresentation> presClass){
pres.remove(presClass);
if (isDefaultPresentation(presClass)){
setDefaultPresIndex(0);
}
for (IPresentationListListener m : listeners){
m.presentationRemoved(presClass);
}
}
public boolean isDefaultPresentation(Class<?> pres_class){
return pres.get(defaultPres).equals(pres_class);
}
public int getPresentationClassesCount(){
return pres.size();
}
public Class<? extends AbstractPresentation> getDefaultPresentationClass(){
return pres.get(defaultPres);
}
public Class<? extends AbstractPresentation> getPresentationClass(int index){
return pres.get(index);
}
public int getDefaultPresIndex(){
return defaultPres;
}
public void setDefaultPresIndex(int index){
defaultPres = index;
for (IPresentationListListener m : listeners){
m.defaultPresentationChanged(getPresentationClass(index));
}
}
public void setDefaultPresentation(
Class<? extends AbstractPresentation> defPres) {
for (int i = 0; i < pres.size(); i++){
if (getPresentationClass(i).equals(defPres)){
setDefaultPresIndex(i);
}
}
}
public void addListener(IPresentationListListener model){
listeners.add(model);
}
public void removeListener(IPresentationListListener model){
listeners.remove(model);
}
public static synchronized ApplicationSystem getInstance() throws NotInitializedException{
if(instance == null){
throw new NotInitializedException("System must be initialized before the use");
}
return instance;
}
}
| apache-2.0 |
yanyusong/android-architectures-demos | qujingRxReMVPDagger+/spacestation/src/main/java/com/zsygfddsd/spacestation/common/widgets/DividerGridItemDecoration.java | 6323 | package com.zsygfddsd.spacestation.common.widgets;
import android.content.Context;
import android.graphics.Canvas;
import android.graphics.Paint;
import android.graphics.Rect;
import android.support.annotation.ColorInt;
import android.support.v7.widget.RecyclerView;
import android.util.TypedValue;
import android.view.View;
/**
* <p/>
* 分割线绘制规则,
* 上下左右都出头,分割线要求完全不透明,不然交叉处会出现重叠
*/
public abstract class DividerGridItemDecoration extends RecyclerView.ItemDecoration {
// private Drawable mDrawable;
private Paint mPaint;
private int lineWidth;//px 分割线宽
/**
* A single color value in the form 0xAARRGGBB.
**/
private int colorRGB;
private boolean isLastItemShowDivider = true;
private boolean isLastItemShowTopDivider = true;
public DividerGridItemDecoration(Context context, int lineWidthDp, @ColorInt int mColorRGB) {
this.colorRGB = mColorRGB;
this.lineWidth = (int) TypedValue.applyDimension(TypedValue.COMPLEX_UNIT_DIP, lineWidthDp, context.getResources().getDisplayMetrics());
mPaint = new Paint(Paint.ANTI_ALIAS_FLAG);
mPaint.setColor(colorRGB);
mPaint.setStyle(Paint.Style.FILL);
}
public DividerGridItemDecoration(Context context, float lineWidthDp, @ColorInt int mColorRGB) {
this.colorRGB = mColorRGB;
this.lineWidth = (int) TypedValue.applyDimension(TypedValue.COMPLEX_UNIT_DIP, lineWidthDp, context.getResources().getDisplayMetrics());
mPaint = new Paint(Paint.ANTI_ALIAS_FLAG);
mPaint.setColor(colorRGB);
mPaint.setStyle(Paint.Style.FILL);
}
@Override
public void onDraw(Canvas c, RecyclerView parent, RecyclerView.State state) {
//上下左右
drawChildTopHorizontal(c, parent);
drawChildBottomHorizontal(c, parent);
drawChildLeftVertical(c, parent);
drawChildRightVertical(c, parent);
}
public void drawChildBottomHorizontal(Canvas c, RecyclerView parent) {
int childCount = parent.getChildCount();
for (int i = 0; i < childCount; i++) {
View child = parent.getChildAt(i);
RecyclerView.LayoutParams params = (RecyclerView.LayoutParams) child
.getLayoutParams();
int left = child.getLeft() - params.leftMargin - lineWidth;
int right = child.getRight() + params.rightMargin + lineWidth;
int top = child.getBottom() + params.bottomMargin;
int bottom = top + lineWidth;
if ((!isLastItemShowDivider && i == childCount - 1)) {
} else {
c.drawRect(left, top, right, bottom, mPaint);
}
}
}
//配置最后一个item的bottom是否显示分割线
public DividerGridItemDecoration configLastItemShowDivider(boolean isLastItemShowDivider) {
this.isLastItemShowDivider = isLastItemShowDivider;
return this;
}
//配置position item的不显示分割线
public DividerGridItemDecoration configLastItemShowTopDivider(boolean isLastItemShowTopDivider) {
this.isLastItemShowTopDivider = isLastItemShowTopDivider;
return this;
}
public void drawChildTopHorizontal(Canvas c, RecyclerView parent) {
int childCount = parent.getChildCount();
for (int i = 0; i < childCount; i++) {
View child = parent.getChildAt(i);
RecyclerView.LayoutParams params = (RecyclerView.LayoutParams) child
.getLayoutParams();
int left = child.getLeft() - params.leftMargin - lineWidth;
int right = child.getRight() + params.rightMargin + lineWidth;
int bottom = child.getTop() - params.topMargin;
int top = bottom - lineWidth;
if ((!isLastItemShowTopDivider && i == childCount - 1)) {
} else {
c.drawRect(left, top, right, bottom, mPaint);
}
}
}
public void drawChildLeftVertical(Canvas c, RecyclerView parent) {
int childCount = parent.getChildCount();
for (int i = 0; i < childCount; i++) {
View child = parent.getChildAt(i);
RecyclerView.LayoutParams params = (RecyclerView.LayoutParams) child
.getLayoutParams();
int top = child.getTop() - params.topMargin - lineWidth;
int bottom = child.getBottom() + params.bottomMargin + lineWidth;
int right = child.getLeft() - params.leftMargin;
int left = right - lineWidth;
c.drawRect(left, top, right, bottom, mPaint);
}
}
public void drawChildRightVertical(Canvas c, RecyclerView parent) {
int childCount = parent.getChildCount();
for (int i = 0; i < childCount; i++) {
View child = parent.getChildAt(i);
RecyclerView.LayoutParams params = (RecyclerView.LayoutParams) child
.getLayoutParams();
int top = child.getTop() - params.topMargin - lineWidth;
int bottom = child.getBottom() + params.bottomMargin + lineWidth;
int left = child.getRight() + params.rightMargin;
int right = left + lineWidth;
c.drawRect(left, top, right, bottom, mPaint);
}
}
@Override
public void getItemOffsets(Rect outRect, View view, RecyclerView parent, RecyclerView.State state) {
//outRect 看源码可知这里只是把Rect类型的outRect作为一个封装了left,right,top,bottom的数据结构,
//作为传递left,right,top,bottom的偏移值来用的
int itemPosition = ((RecyclerView.LayoutParams) view.getLayoutParams()).getViewLayoutPosition();
//
boolean[] sideOffsetBooleans = getItemSidesIsHaveOffsets(itemPosition);
int left = sideOffsetBooleans[0] ? lineWidth : 0;
int top = sideOffsetBooleans[1] ? lineWidth : 0;
int right = sideOffsetBooleans[2] ? lineWidth : 0;
int bottom = sideOffsetBooleans[3] ? lineWidth : 0;
outRect.set(left, top, right, bottom);
}
/**
* 顺序:left, top, right, bottom
*
* @return boolean[4]
*/
public abstract boolean[] getItemSidesIsHaveOffsets(int itemPosition);
}
| apache-2.0 |
alexschimpf/joe | core/src/com/tendersaucer/joe/ColorScheme.java | 3237 | package com.tendersaucer.joe;
import com.badlogic.gdx.graphics.Color;
import com.badlogic.gdx.math.MathUtils;
import com.tendersaucer.joe.util.ColorUtils;
import com.tendersaucer.joe.util.RandomUtils;
/**
* Created by Alex on 8/21/2016.
*/
public final class ColorScheme {
private static final ColorScheme INSTANCE = new ColorScheme();
private static final float MIN_SHADE_BRIGHTNESS = 0.98f;
private static final float MAX_SHADE_BRIGHTNESS = 1.02f;
private static final Color BACKGROUND_COLOR = new Color(0.15f, 0.15f, 0.15f, 1);
private static final Color BLUE = Color.valueOf("69B4FFFF");
private static final Color GREEN = Color.valueOf("B4FF69FF");
private static final Color ORANGE = Color.valueOf("FFB469FF");
private static final Color TEAL = Color.valueOf("69FFB4FF");
private static final Color PURPLE = Color.valueOf("B469FFFF");
private static final Color PINK = Color.valueOf("FF69B4FF");
private static final Color[][] COLOR_SCHEMES = new Color[][] {
new Color[] { BLUE, ORANGE },
new Color[] { BLUE, PINK },
new Color[] { GREEN, PURPLE },
new Color[] { GREEN, ORANGE },
new Color[] { GREEN, PINK },
new Color[] { TEAL, PURPLE },
new Color[] { TEAL, ORANGE },
new Color[] { TEAL, PINK },
};
public enum ColorType {
PRIMARY, SECONDARY
}
public enum ReturnType {
SHARED, NEW
}
private Color primaryColor;
private Color secondaryColor;
private Color backgroundColor;
private ColorScheme() {
}
public static ColorScheme getInstance() {
return INSTANCE;
}
public void reset() {
RandomUtils.shuffle(COLOR_SCHEMES);
Color[] scheme = RandomUtils.pickFrom(COLOR_SCHEMES);
RandomUtils.shuffle(scheme);
primaryColor = new Color(scheme[0]);
secondaryColor = new Color(scheme[1]);
backgroundColor = BACKGROUND_COLOR;
// TODO: This is just for convenience. Remove later.
ColorUtils.shade(primaryColor, 1);
ColorUtils.shade(secondaryColor, 1);
ColorUtils.shade(backgroundColor, 1);
}
public Color[] getColors() {
return new Color[] { BLUE, ORANGE, GREEN, PURPLE, TEAL, PINK };
}
public Color getPrimaryColor(ReturnType returnType) {
if (returnType == ReturnType.SHARED) {
return primaryColor;
}
return new Color(primaryColor);
}
public Color getSecondaryColor(ReturnType returnType) {
if (returnType == ReturnType.SHARED) {
return secondaryColor;
}
return new Color(secondaryColor);
}
public Color getShadedPrimaryColor() {
Color color = getPrimaryColor(ReturnType.NEW);
ColorUtils.shade(color, MathUtils.random(MIN_SHADE_BRIGHTNESS, MAX_SHADE_BRIGHTNESS));
return color;
}
public Color getShadedSecondaryColor() {
Color color = getSecondaryColor(ReturnType.NEW);
ColorUtils.shade(color, MathUtils.random(MIN_SHADE_BRIGHTNESS, MAX_SHADE_BRIGHTNESS));
return color;
}
public Color getBackgroundColor() {
return backgroundColor;
}
}
| apache-2.0 |
palantir/atlasdb | atlasdb-cassandra/src/main/java/com/palantir/atlasdb/keyvalue/cassandra/CellValuePutter.java | 7813 | /*
* (c) Copyright 2018 Palantir Technologies Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.palantir.atlasdb.keyvalue.cassandra;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
import com.palantir.atlasdb.cassandra.CassandraKeyValueServiceConfig;
import com.palantir.atlasdb.keyvalue.api.Cell;
import com.palantir.atlasdb.keyvalue.api.TableReference;
import com.palantir.atlasdb.keyvalue.api.Value;
import com.palantir.atlasdb.keyvalue.cassandra.thrift.MutationMap;
import com.palantir.atlasdb.keyvalue.impl.Cells;
import com.palantir.atlasdb.keyvalue.impl.IterablePartitioner;
import com.palantir.atlasdb.pue.KvsConsensusForgettingStore;
import com.palantir.atlasdb.util.AnnotatedCallable;
import com.palantir.atlasdb.util.AnnotationType;
import com.palantir.common.base.FunctionCheckedException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.Callable;
import java.util.function.LongSupplier;
import org.apache.cassandra.thrift.Column;
import org.apache.cassandra.thrift.ColumnOrSuperColumn;
import org.apache.cassandra.thrift.Mutation;
public class CellValuePutter {
/**
* This value has been chosen so that, in case of internal KVS inconsistency, the value stored with
* {@link KvsConsensusForgettingStore#put(Cell, byte[])} is always considered as the latest value. It is the
* responsibility of the user of this class to verify that this is true for the particular KVS implementation,
* which it is and must remain so for the Cassandra KVS.
*/
public static final long SET_TIMESTAMP = Long.MAX_VALUE - 10;
private final LongSupplier timestampOverrideSupplier;
private final CassandraKeyValueServiceConfig config;
private final CassandraClientPool clientPool;
private final TaskRunner taskRunner;
private final WrappingQueryRunner queryRunner;
public CellValuePutter(
CassandraKeyValueServiceConfig config,
CassandraClientPool clientPool,
TaskRunner taskRunner,
WrappingQueryRunner queryRunner,
LongSupplier timestampOverrideSupplier) {
this.config = config;
this.clientPool = clientPool;
this.taskRunner = taskRunner;
this.queryRunner = queryRunner;
this.timestampOverrideSupplier = timestampOverrideSupplier;
}
void putWithOverriddenTimestamps(
final String kvsMethodName, final TableReference tableRef, final Iterable<Map.Entry<Cell, Value>> values) {
putInternal(kvsMethodName, tableRef, values, Optional.of(timestampOverrideSupplier.getAsLong()));
}
void put(final String kvsMethodName, final TableReference tableRef, final Iterable<Map.Entry<Cell, Value>> values) {
putInternal(kvsMethodName, tableRef, values, Optional.empty());
}
void set(final String kvsMethodName, final TableReference tableRef, final Iterable<Map.Entry<Cell, Value>> values) {
putInternal(kvsMethodName, tableRef, values, Optional.of(SET_TIMESTAMP));
}
/**
* @param values the values to put. The timestamp of each value is the AtlasDB start timestamp, which is a part of
* the column name in Cassandra.
* @param overrideTimestamp the Cassandra timestamp to write the value at. A higher Cassandra timestamp determines
* which write wins in case of a discrepancy on multiple nodes. If empty, defaults to the
* start timestamp from above.
*/
private void putInternal(
final String kvsMethodName,
final TableReference tableRef,
final Iterable<Map.Entry<Cell, Value>> values,
Optional<Long> overrideTimestamp) {
Map<InetSocketAddress, Map<Cell, Value>> cellsByHost = HostPartitioner.partitionMapByHost(clientPool, values);
List<Callable<Void>> tasks = new ArrayList<>(cellsByHost.size());
for (final Map.Entry<InetSocketAddress, Map<Cell, Value>> entry : cellsByHost.entrySet()) {
tasks.add(AnnotatedCallable.wrapWithThreadName(
AnnotationType.PREPEND,
"Atlas put " + entry.getValue().size() + " cell values to " + tableRef + " on " + entry.getKey(),
() -> {
putForSingleHost(
kvsMethodName,
entry.getKey(),
tableRef,
entry.getValue().entrySet(),
overrideTimestamp);
return null;
}));
}
taskRunner.runAllTasksCancelOnFailure(tasks);
}
private static Long getEntrySize(Map.Entry<Cell, Value> input) {
return input.getValue().getContents().length + 4L + Cells.getApproxSizeOfCell(input.getKey());
}
private void putForSingleHost(
String kvsMethodName,
final InetSocketAddress host,
final TableReference tableRef,
final Iterable<Map.Entry<Cell, Value>> values,
Optional<Long> overrideTimestamp)
throws Exception {
clientPool.runWithRetryOnHost(host, new FunctionCheckedException<CassandraClient, Void, Exception>() {
@Override
public Void apply(CassandraClient client) throws Exception {
int mutationBatchCount = config.mutationBatchCount();
int mutationBatchSizeBytes = config.mutationBatchSizeBytes();
for (List<Map.Entry<Cell, Value>> partition : IterablePartitioner.partitionByCountAndBytes(
values, mutationBatchCount, mutationBatchSizeBytes, tableRef, CellValuePutter::getEntrySize)) {
MutationMap map = new MutationMap();
for (Map.Entry<Cell, Value> e : partition) {
Cell cell = e.getKey();
Column col = overrideTimestamp
.map(ts -> CassandraKeyValueServices.createColumnForDelete(cell, e.getValue(), ts))
.orElseGet(() -> CassandraKeyValueServices.createColumn(cell, e.getValue()));
ColumnOrSuperColumn colOrSup = new ColumnOrSuperColumn();
colOrSup.setColumn(col);
Mutation mutation = new Mutation();
mutation.setColumn_or_supercolumn(colOrSup);
map.addMutationForCell(cell, tableRef, mutation);
}
queryRunner.batchMutate(
kvsMethodName,
client,
ImmutableSet.of(tableRef),
map,
CassandraKeyValueServiceImpl.WRITE_CONSISTENCY);
}
return null;
}
@Override
public String toString() {
return "batch_mutate(" + host + ", " + tableRef.getQualifiedName() + ", " + Iterables.size(values)
+ " values)";
}
});
}
}
| apache-2.0 |
oprisnik/semdroid | semdroid-plugin-spa/src/main/java/com/oprisnik/semdroid/feature/instance/method/ConfigurableMIG.java | 5081 | /*
* Copyright 2014 Alexander Oprisnik
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.oprisnik.semdroid.feature.instance.method;
import com.oprisnik.semdroid.app.DexMethod;
import com.oprisnik.semdroid.app.LocalVariable;
import com.oprisnik.semdroid.app.MethodCall;
import com.oprisnik.semdroid.app.Opcode;
import com.oprisnik.semdroid.config.BadConfigException;
import com.oprisnik.semdroid.config.Config;
import com.oprisnik.semdroid.feature.instance.MethodInstanceGenerator;
import com.oprisnik.semdroid.feature.instance.config.IGConfiguration;
import com.oprisnik.semdroid.feature.value.LocalVarFeatureValueGenerator;
import com.oprisnik.semdroid.feature.value.MethodCallFeatureValueGenerator;
import com.oprisnik.semdroid.filter.LocalVarFilter;
import com.oprisnik.semdroid.filter.MethodCallFilter;
import java.util.List;
import at.tuflowgraphy.semantic.base.domain.data.InstanceDataElement;
/**
* Configurable method instance generator.
*/
public class ConfigurableMIG extends MethodInstanceGenerator {
protected MethodCallFilter mMethodCallFilter;
protected MethodCallFeatureValueGenerator mMethodCallFVG;
protected LocalVarFilter mLocalVarFilter;
protected LocalVarFeatureValueGenerator mLocalVarFVG;
protected IGConfiguration mConfig;
@Override
public void init(Config config) throws BadConfigException {
super.init(config);
mConfig = new IGConfiguration();
mConfig.init(config);
mMethodCallFilter = getMethodCallFilter();
mMethodCallFVG = getMethodCallFeatureValueGenerator();
mLocalVarFilter = getLocalVarFilter();
mLocalVarFVG = getLocalVarFeatureValueGenerator();
}
@Override
protected void getMethodInstance(DexMethod method, InstanceDataElement results) {
// opcodes
if (mConfig.addOpcodeHistogram || mConfig.addOpcodeCount || mConfig.addOpcodes) {
List<Opcode> opcodes = method.getOpcodes(getMethodCallInclusionDepth());
if (mConfig.addOpcodeHistogram) {
results.addValue(getDistanceBasedFeatureDataElement("opcodeHisto",
getOpcodeHistogram(opcodes, mConfig.normalizeOpcodeHistogram)));
}
if (mConfig.addOpcodeCount) {
results.addValue(getDistanceBasedFeatureDataElement("opcodeCount", getOpcodeGrouper().getGroupedList(opcodes).size()));
}
if (mConfig.addOpcodes) {
addOpcodes("opcode", opcodes, results);
}
}
// method calls
if (mConfig.addMethodCalls || mConfig.addMethodCallCount) {
List<MethodCall> methodCalls = method.getMethodCalls(getMethodCallInclusionDepth());
if (mConfig.addMethodCalls) {
addMethodCalls(methodCalls, results);
}
if (mConfig.addMethodCallCount) {
results.addValue(getDistanceBasedFeatureDataElement("methodCallCount", methodCalls.size()));
}
}
// local variables
if (mConfig.addBasicLocalVarHistogram || mConfig.addLocalVars || mConfig.addLocalVarCount) {
List<LocalVariable> localVariables = method.getLocalVariables(getMethodCallInclusionDepth());
if (mConfig.addBasicLocalVarHistogram) {
results.addValue(getDistanceBasedFeatureDataElement("basicLocalVarHisto", getBasicLocalVarHistogram(localVariables, mConfig.normalizeBasicLocalVarHistogram)));
}
if (mConfig.addLocalVars) {
addLocalVars(localVariables, results);
}
if (mConfig.addLocalVarCount) {
results.addValue(getDistanceBasedFeatureDataElement("localVariableCount", localVariables.size()));
}
}
// permissions
if (mConfig.addPermissions) {
for (String permission : method.getPermissions()) {
results.addValue(getSymbolicFeatureDataElement("usesPermission",
permission));
}
}
}
protected void addMethodCalls(List<MethodCall> methodCalls, InstanceDataElement results) {
addAsSet("methodCall", methodCalls, mMethodCallFilter, mMethodCallFVG, results);
}
protected void addLocalVars(List<LocalVariable> localVars, InstanceDataElement results) {
addAsSet("localVar", localVars, mLocalVarFilter, mLocalVarFVG, results);
}
}
| apache-2.0 |
googleapis/java-bigquerystorage | google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteSettings.java | 9793 | /*
* Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.cloud.bigquery.storage.v1;
import com.google.api.core.ApiFunction;
import com.google.api.core.BetaApi;
import com.google.api.gax.core.GoogleCredentialsProvider;
import com.google.api.gax.core.InstantiatingExecutorProvider;
import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider;
import com.google.api.gax.rpc.ApiClientHeaderProvider;
import com.google.api.gax.rpc.ClientContext;
import com.google.api.gax.rpc.ClientSettings;
import com.google.api.gax.rpc.StreamingCallSettings;
import com.google.api.gax.rpc.TransportChannelProvider;
import com.google.api.gax.rpc.UnaryCallSettings;
import com.google.cloud.bigquery.storage.v1.stub.BigQueryWriteStubSettings;
import java.io.IOException;
import java.util.List;
import javax.annotation.Generated;
// AUTO-GENERATED DOCUMENTATION AND CLASS.
/**
* Settings class to configure an instance of {@link BigQueryWriteClient}.
*
* <p>The default instance has everything set to sensible defaults:
*
* <ul>
* <li>The default service address (bigquerystorage.googleapis.com) and default port (443) are
* used.
* <li>Credentials are acquired automatically through Application Default Credentials.
* <li>Retries are configured for idempotent methods but not for non-idempotent methods.
* </ul>
*
* <p>The builder of this class is recursive, so contained classes are themselves builders. When
* build() is called, the tree of builders is called to create the complete settings object.
*
* <p>For example, to set the total timeout of createWriteStream to 30 seconds:
*
* <pre>{@code
* BigQueryWriteSettings.Builder bigQueryWriteSettingsBuilder = BigQueryWriteSettings.newBuilder();
* bigQueryWriteSettingsBuilder
* .createWriteStreamSettings()
* .setRetrySettings(
* bigQueryWriteSettingsBuilder
* .createWriteStreamSettings()
* .getRetrySettings()
* .toBuilder()
* .setTotalTimeout(Duration.ofSeconds(30))
* .build());
* BigQueryWriteSettings bigQueryWriteSettings = bigQueryWriteSettingsBuilder.build();
* }</pre>
*/
@Generated("by gapic-generator-java")
public class BigQueryWriteSettings extends ClientSettings<BigQueryWriteSettings> {
/** Returns the object with the settings used for calls to createWriteStream. */
public UnaryCallSettings<CreateWriteStreamRequest, WriteStream> createWriteStreamSettings() {
return ((BigQueryWriteStubSettings) getStubSettings()).createWriteStreamSettings();
}
/** Returns the object with the settings used for calls to appendRows. */
public StreamingCallSettings<AppendRowsRequest, AppendRowsResponse> appendRowsSettings() {
return ((BigQueryWriteStubSettings) getStubSettings()).appendRowsSettings();
}
/** Returns the object with the settings used for calls to getWriteStream. */
public UnaryCallSettings<GetWriteStreamRequest, WriteStream> getWriteStreamSettings() {
return ((BigQueryWriteStubSettings) getStubSettings()).getWriteStreamSettings();
}
/** Returns the object with the settings used for calls to finalizeWriteStream. */
public UnaryCallSettings<FinalizeWriteStreamRequest, FinalizeWriteStreamResponse>
finalizeWriteStreamSettings() {
return ((BigQueryWriteStubSettings) getStubSettings()).finalizeWriteStreamSettings();
}
/** Returns the object with the settings used for calls to batchCommitWriteStreams. */
public UnaryCallSettings<BatchCommitWriteStreamsRequest, BatchCommitWriteStreamsResponse>
batchCommitWriteStreamsSettings() {
return ((BigQueryWriteStubSettings) getStubSettings()).batchCommitWriteStreamsSettings();
}
/** Returns the object with the settings used for calls to flushRows. */
public UnaryCallSettings<FlushRowsRequest, FlushRowsResponse> flushRowsSettings() {
return ((BigQueryWriteStubSettings) getStubSettings()).flushRowsSettings();
}
public static final BigQueryWriteSettings create(BigQueryWriteStubSettings stub)
throws IOException {
return new BigQueryWriteSettings.Builder(stub.toBuilder()).build();
}
/** Returns a builder for the default ExecutorProvider for this service. */
public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() {
return BigQueryWriteStubSettings.defaultExecutorProviderBuilder();
}
/** Returns the default service endpoint. */
public static String getDefaultEndpoint() {
return BigQueryWriteStubSettings.getDefaultEndpoint();
}
/** Returns the default service scopes. */
public static List<String> getDefaultServiceScopes() {
return BigQueryWriteStubSettings.getDefaultServiceScopes();
}
/** Returns a builder for the default credentials for this service. */
public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() {
return BigQueryWriteStubSettings.defaultCredentialsProviderBuilder();
}
/** Returns a builder for the default ChannelProvider for this service. */
public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() {
return BigQueryWriteStubSettings.defaultGrpcTransportProviderBuilder();
}
public static TransportChannelProvider defaultTransportChannelProvider() {
return BigQueryWriteStubSettings.defaultTransportChannelProvider();
}
@BetaApi("The surface for customizing headers is not stable yet and may change in the future.")
public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() {
return BigQueryWriteStubSettings.defaultApiClientHeaderProviderBuilder();
}
/** Returns a new builder for this class. */
public static Builder newBuilder() {
return Builder.createDefault();
}
/** Returns a new builder for this class. */
public static Builder newBuilder(ClientContext clientContext) {
return new Builder(clientContext);
}
/** Returns a builder containing all the values of this settings class. */
public Builder toBuilder() {
return new Builder(this);
}
protected BigQueryWriteSettings(Builder settingsBuilder) throws IOException {
super(settingsBuilder);
}
/** Builder for BigQueryWriteSettings. */
public static class Builder extends ClientSettings.Builder<BigQueryWriteSettings, Builder> {
protected Builder() throws IOException {
this(((ClientContext) null));
}
protected Builder(ClientContext clientContext) {
super(BigQueryWriteStubSettings.newBuilder(clientContext));
}
protected Builder(BigQueryWriteSettings settings) {
super(settings.getStubSettings().toBuilder());
}
protected Builder(BigQueryWriteStubSettings.Builder stubSettings) {
super(stubSettings);
}
private static Builder createDefault() {
return new Builder(BigQueryWriteStubSettings.newBuilder());
}
public BigQueryWriteStubSettings.Builder getStubSettingsBuilder() {
return ((BigQueryWriteStubSettings.Builder) getStubSettings());
}
/**
* Applies the given settings updater function to all of the unary API methods in this service.
*
* <p>Note: This method does not support applying settings to streaming methods.
*/
public Builder applyToAllUnaryMethods(
ApiFunction<UnaryCallSettings.Builder<?, ?>, Void> settingsUpdater) {
super.applyToAllUnaryMethods(
getStubSettingsBuilder().unaryMethodSettingsBuilders(), settingsUpdater);
return this;
}
/** Returns the builder for the settings used for calls to createWriteStream. */
public UnaryCallSettings.Builder<CreateWriteStreamRequest, WriteStream>
createWriteStreamSettings() {
return getStubSettingsBuilder().createWriteStreamSettings();
}
/** Returns the builder for the settings used for calls to appendRows. */
public StreamingCallSettings.Builder<AppendRowsRequest, AppendRowsResponse>
appendRowsSettings() {
return getStubSettingsBuilder().appendRowsSettings();
}
/** Returns the builder for the settings used for calls to getWriteStream. */
public UnaryCallSettings.Builder<GetWriteStreamRequest, WriteStream> getWriteStreamSettings() {
return getStubSettingsBuilder().getWriteStreamSettings();
}
/** Returns the builder for the settings used for calls to finalizeWriteStream. */
public UnaryCallSettings.Builder<FinalizeWriteStreamRequest, FinalizeWriteStreamResponse>
finalizeWriteStreamSettings() {
return getStubSettingsBuilder().finalizeWriteStreamSettings();
}
/** Returns the builder for the settings used for calls to batchCommitWriteStreams. */
public UnaryCallSettings.Builder<
BatchCommitWriteStreamsRequest, BatchCommitWriteStreamsResponse>
batchCommitWriteStreamsSettings() {
return getStubSettingsBuilder().batchCommitWriteStreamsSettings();
}
/** Returns the builder for the settings used for calls to flushRows. */
public UnaryCallSettings.Builder<FlushRowsRequest, FlushRowsResponse> flushRowsSettings() {
return getStubSettingsBuilder().flushRowsSettings();
}
@Override
public BigQueryWriteSettings build() throws IOException {
return new BigQueryWriteSettings(this);
}
}
}
| apache-2.0 |
Siberis/AtomisProtocol | task-example/task-domain/src/main/java/siberis/atomis/task/messages/DeleteTask.java | 492 | package siberis.atomis.task.messages;
import core.message.MessagePayload;
import siberis.atomis.task.utils.TaskConstants;
/**
* Created by siberis on 11/7/2016.
*/
public class DeleteTask extends MessagePayload {
private String id;
public DeleteTask() {
}
public DeleteTask(String id) {
this.id = id;
}
@Override
public String getComputeTopic() {
return TaskConstants.TASK_TOPIC;
}
public String getId() {
return id;
}
} | apache-2.0 |
janus-project/guava.janusproject.io | guava/src/com/google/common/eventbus/AsyncSyncEventBus.java | 3992 | package com.google.common.eventbus;
import java.lang.annotation.Annotation;
import java.lang.reflect.InvocationTargetException;
import java.util.Iterator;
import java.util.Queue;
import java.util.Set;
import java.util.concurrent.Executor;
import java.util.logging.Level;
import java.util.logging.Logger;
import com.google.common.eventbus.EventBus.LoggingHandler;
/**
* An {@link EventBus} that mix synchronous event dispatching of {@link EventBus} and asynchronous of {@link AsyncEventBus}
*
* In case of synchronous dispatching this is the calling thread that executes the dispatching otherwise it is the specified executor
*
* @author Nicolas Gaud
*
*/
public class AsyncSyncEventBus extends EventBus {
private Dispatcher syncDispatcher = Dispatcher.perThreadDispatchQueue();
public AsyncSyncEventBus(String identifier, Executor executor, Class<? extends Annotation> annotation) {
super(identifier, executor, Dispatcher.legacyAsync(), LoggingHandler.INSTANCE,annotation);
}
public AsyncSyncEventBus(Executor executor, SubscriberExceptionHandler subscriberExceptionHandler, Class<? extends Annotation> annotation) {
super("default", executor, Dispatcher.legacyAsync(), subscriberExceptionHandler,annotation);
}
public AsyncSyncEventBus(Executor executor, Class<? extends Annotation> annotation) {
super("default", executor, Dispatcher.legacyAsync(), LoggingHandler.INSTANCE,annotation);
}
public void fire(Object event) {
Iterator<Subscriber> eventSubscribers = getSubscribers().getSubscribers(event);
if (eventSubscribers.hasNext()) {
this.syncDispatcher.dispatch(event, eventSubscribers);
} else if (!(event instanceof DeadEvent)) {
// the event had no subscribers and was not itself a DeadEvent
this.fire(new DeadEvent(this, event));
}
}
/*
* public AsyncSyncEventBus(String identifier, Executor executor) { super(identifier, executor); }
*
* public AsyncSyncEventBus(Executor executor, SubscriberExceptionHandler subscriberExceptionHandler) { super(executor, subscriberExceptionHandler); }
*
* public AsyncSyncEventBus(Executor executor) { super(executor);
*
* }
*
* public void fire(Object event) { Set<Class<?>> dispatchTypes = flattenHierarchy(event.getClass());
*
* boolean dispatched = false; for (Class<?> eventType : dispatchTypes) { subscribersByTypeLock.readLock().lock(); try { Set<EventSubscriber> wrappers = subscribersByType .get(eventType);
*
* if (!wrappers.isEmpty()) { dispatched = true; for (EventSubscriber wrapper : wrappers) { eventsToDispatch.get().offer(new EventWithSubscriber(event, wrapper)); } } } finally { subscribersByTypeLock.readLock().unlock(); } }
*
* if (!dispatched && !(event instanceof DeadEvent)) { fire(new DeadEvent(this, event)); }
*
* fireQueuedEvents(); }
*
* void fireQueuedEvents() { // don't dispatch if we're already dispatching, that would allow // reentrancy // and out-of-order events. Instead, leave the events to be dispatched // after the in-progress dispatch is complete. if (isDispatching.get()) { return; }
*
* isDispatching.set(true); try { Queue<EventWithSubscriber> events = eventsToDispatch.get(); EventWithSubscriber eventWithSubscriber; while ((eventWithSubscriber = events.poll()) != null) { synchronousDispatch(eventWithSubscriber.event, eventWithSubscriber.subscriber); } } finally { isDispatching.remove(); eventsToDispatch.remove(); } }
*
* void synchronousDispatch(Object event, EventSubscriber wrapper) { try { wrapper.handleEvent(event); } catch (InvocationTargetException e) { try { subscriberExceptionHandler.handleException( e.getCause(), new SubscriberExceptionContext(this, event, wrapper .getSubscriber(), wrapper.getMethod())); } catch (Throwable t) { // If the exception handler throws, log it. There isn't much // else to do! Logger.getLogger(AsyncSyncEventBus.class.getName()) .log(Level.SEVERE, String.format( "Exception %s thrown while handling exception: %s", t, e.getCause()), t); } } }
*/
}
| apache-2.0 |
nokok/Karaffe | src/main/java/org/karaffe/compiler/backend/jvm/BytecodeSelectorForNumber.java | 1055 | package org.karaffe.compiler.backend.jvm;
import org.objectweb.asm.Opcodes;
import org.objectweb.asm.tree.AbstractInsnNode;
import org.objectweb.asm.tree.InsnNode;
import org.objectweb.asm.tree.IntInsnNode;
import org.objectweb.asm.tree.LdcInsnNode;
class BytecodeSelectorForNumber {
public static AbstractInsnNode fromInt(int value) {
switch (value) {
case -1:
return new InsnNode(Opcodes.ICONST_M1);
case 0:
return new InsnNode(Opcodes.ICONST_0);
case 1:
return new InsnNode(Opcodes.ICONST_1);
case 2:
return new InsnNode(Opcodes.ICONST_2);
case 3:
return new InsnNode(Opcodes.ICONST_3);
case 4:
return new InsnNode(Opcodes.ICONST_4);
case 5:
return new InsnNode(Opcodes.ICONST_5);
}
if (Byte.MIN_VALUE <= value && value <= Byte.MAX_VALUE) {
return new IntInsnNode(Opcodes.BIPUSH, value);
}
if (Short.MIN_VALUE <= value && value <= Short.MAX_VALUE) {
return new IntInsnNode(Opcodes.SIPUSH, value);
}
return new LdcInsnNode(value);
}
}
| apache-2.0 |
piotrpietrzak/aprcalc | src/main/java/me/pietrzak/aprcalc/root/NewtonsMethod.java | 1594 | package me.pietrzak.aprcalc.root;
import java.math.BigDecimal;
import java.util.Optional;
import java.util.function.Function;
public class NewtonsMethod {
private static final int maximumNumberOfIterations = 5000;
private static final int internalComputationsScale = 20;
private static final BigDecimal maximumRange = BigDecimal.TEN.pow(322);
private static final BigDecimal epsilon = BigDecimal.valueOf(1E-10);
public static Optional<BigDecimal> findZero(Function<BigDecimal, BigDecimal> function, Function<BigDecimal, BigDecimal> derivative) {
BigDecimal x = BigDecimal.ZERO;
int iterationsCounter = 0;
do {
BigDecimal derivativeAtX = derivative.apply(x);
if (derivativeAtX.compareTo(BigDecimal.ZERO) == 0) {
return Optional.empty();
}
if (x.compareTo(BigDecimal.ZERO)<0) {
x = BigDecimal.ZERO;
}
if (iterationsCounter++ > maximumNumberOfIterations) {
return Optional.empty();
}
if (x.compareTo(maximumRange)>0) {
return Optional.empty();
}
x = x.subtract(function.apply(x).divide(derivativeAtX, internalComputationsScale, BigDecimal.ROUND_HALF_UP));
} while (errorIsLargerThanExpected(function, x, epsilon));
return Optional.of(x);
}
private static boolean errorIsLargerThanExpected(Function<BigDecimal, BigDecimal> function, BigDecimal x, BigDecimal epsilon) {
return function.apply(x).abs().compareTo(epsilon) >= 0;
}
}
| apache-2.0 |
NAUMEN-GP/saiku | saiku-core/saiku-service/src/main/java/org/saiku/repository/SaikuWebdavServlet.java | 5265 | package org.saiku.repository;
import org.saiku.database.dto.SaikuUser;
import org.saiku.service.datasource.RepositoryDatasourceManager;
import org.saiku.service.user.UserService;
import org.apache.jackrabbit.webdav.*;
import org.apache.jackrabbit.webdav.simple.SimpleWebdavServlet;
import org.apache.jackrabbit.webdav.util.CSRFUtil;
import org.springframework.web.context.WebApplicationContext;
import org.springframework.web.context.support.WebApplicationContextUtils;
import java.io.IOException;
import java.util.List;
import javax.jcr.Repository;
import javax.servlet.ServletConfig;
import javax.servlet.ServletContext;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
/**
* Created by bugg on 04/09/14.
*/
public final class SaikuWebdavServlet extends SimpleWebdavServlet {
private RepositoryDatasourceManager bean;
private CSRFUtil csrfUtil;
private UserService us;
@Override
public void init(ServletConfig config) throws ServletException {
super.init(config);
String csrfParam = getInitParameter(INIT_PARAM_CSRF_PROTECTION);
csrfUtil = new CSRFUtil(csrfParam);
ServletContext context = getServletContext();
WebApplicationContext applicationContext =
WebApplicationContextUtils
.getWebApplicationContext(context);
bean = (RepositoryDatasourceManager) applicationContext.getBean("repositoryDsManager");
us = (UserService) applicationContext.getBean("userServiceBean");
}
private boolean checkUserRole(HttpServletRequest request){
for(SaikuUser u: us.getUsers()){
if(u.getUsername().equals(request.getRemoteUser())){
String[] roles = us.getRoles(u);
List<String> admin = us.getAdminRoles();
for(String r: roles){
if(admin.contains(r)){
return true;
}
}
}
}
return false;
}
/**
* Service the given request.
*
* @param request
* @param response
* @throws ServletException
* @throws IOException
*/
@Override
protected void service(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
WebdavRequest webdavRequest = new WebdavRequestImpl(request, getLocatorFactory(), isCreateAbsoluteURI());
// DeltaV requires 'Cache-Control' header for all methods except 'VERSION-CONTROL' and 'REPORT'.
int methodCode = DavMethods.getMethodCode(request.getMethod());
boolean noCache = DavMethods.isDeltaVMethod(webdavRequest) && !(DavMethods.DAV_VERSION_CONTROL == methodCode || DavMethods.DAV_REPORT == methodCode);
WebdavResponse webdavResponse = new WebdavResponseImpl(response, noCache);
try {
// make sure there is a authenticated user
if (!getDavSessionProvider().attachSession(webdavRequest)) {
return;
}
if(!checkUserRole(request)){
return;
}
// perform referrer host checks if CSRF protection is enabled
if (!csrfUtil.isValidRequest(webdavRequest)) {
webdavResponse.sendError(HttpServletResponse.SC_FORBIDDEN);
return;
}
// check matching if=header for lock-token relevant operations
DavResource resource = getResourceFactory().createResource(webdavRequest.getRequestLocator(), webdavRequest, webdavResponse);
if (!isPreconditionValid(webdavRequest, resource)) {
webdavResponse.sendError(HttpServletResponse.SC_PRECONDITION_FAILED);
return;
}
if (!execute(webdavRequest, webdavResponse, methodCode, resource)) {
super.service(request, response);
}
} catch (DavException e) {
if (e.getErrorCode() == HttpServletResponse.SC_UNAUTHORIZED) {
sendUnauthorized(webdavRequest, webdavResponse, e);
} else {
webdavResponse.sendError(e);
}
} finally {
getDavSessionProvider().releaseSession(webdavRequest);
}
}
@Override
public Repository getRepository() {
return (Repository) bean.getRepository();
}
@Override
public void doPost(WebdavRequest request,
WebdavResponse response,
DavResource resource)
throws IOException,
DavException{
// super.doPost(request, response, resource);
DavResource parentResource = resource.getCollection();
if (parentResource == null || !parentResource.exists()) {
// parent does not exist
response.sendError(DavServletResponse.SC_CONFLICT);
return;
}
int status;
// test if resource already exists
if (resource.exists()) {
status = DavServletResponse.SC_NO_CONTENT;
} else {
status = DavServletResponse.SC_CREATED;
}
parentResource.addMember(resource, getInputContext(request, request.getInputStream()));
response.setStatus(status);
}
@Override
public void doPut(WebdavRequest request,
WebdavResponse response,
DavResource resource)
throws IOException,
DavException{
super.doPut(request, response, resource);
}
}
| apache-2.0 |
fdorigo/rmfly | src/java/com/fdorigo/rmfly/jpa/session/exceptions/NonexistentEntityException.java | 309 | package com.fdorigo.rmfly.jpa.session.exceptions;
public class NonexistentEntityException extends Exception {
public NonexistentEntityException(String message, Throwable cause) {
super(message, cause);
}
public NonexistentEntityException(String message) {
super(message);
}
}
| apache-2.0 |
Jareld/VPlayer | app/src/main/java/com/example/lyc/vrexplayer/task/CameraDataReciverTask.java | 21940 | package com.example.lyc.vrexplayer.task;
import android.media.MediaPlayer;
import android.net.LocalServerSocket;
import android.net.LocalSocket;
import android.os.AsyncTask;
import android.os.Handler;
import android.util.Log;
import android.view.SurfaceView;
import com.example.lyc.vrexplayer.Utils.CameraUtil;
import com.example.lyc.vrexplayer.Utils.LogUtils;
import com.example.lyc.vrexplayer.glsurface.GLFrameRenderer;
import java.io.IOException;
import java.io.InputStream;
import java.net.ServerSocket;
import java.net.Socket;
import java.nio.ByteBuffer;
/*
* @项目名: TestWifiDerect
* @包名: com.example.lyc2.testwifiderect.task
* @文件名: CameraDataReciverTask
* @创建者: LYC2
* @创建时间: 2016/11/29 14:55
* @描述: TODO
*/
public class CameraDataReciverTask
extends AsyncTask<Void, Void, String>
{
private static final String TAG = "hyCameraDataReciverTask";
private static final String SOCKET_ADDRESS = "LOCAL_SOCKET_ADDRESS";
private GLFrameRenderer mGlFrameRenderer;
private Socket mClient;
private ServerSocket mServerSocket;
private String mMessage;
private String mEchoMessage;
private String mFrameString;
private Handler mHandler = new Handler();
private LocalServerSocket localServerSocket;
private LocalSocket localSocketSender;
private LocalSocket localSocketReceiver;
private MediaPlayer mMediaPlayer;
private SurfaceView mSurfaceView;
public CameraDataReciverTask(GLFrameRenderer glFrameRenderer) {
mGlFrameRenderer = glFrameRenderer;
}
public CameraDataReciverTask() {
}
public CameraDataReciverTask(GLFrameRenderer glFrameRenderer,
MediaPlayer mp,
SurfaceView sv_reviver_data)
{
mGlFrameRenderer = glFrameRenderer;
mMediaPlayer = mp;
mSurfaceView = sv_reviver_data;
}
/*
* (non-Javadoc)
*
* @see android.os.AsyncTask#onPostExecute(java.lang.Object)
*/
@Override
protected void onPostExecute(String result) {
}
/*
* (non-Javadoc)
*
* @see android.os.AsyncTask#onPreExecute()
*/
@Override
protected void onPreExecute() {
}
@Override
protected String doInBackground(Void... params) {
mServerSocket = null;
try {
String name = Thread.currentThread()
.getName();
mServerSocket = new ServerSocket(10086);
//mClient.setReceiveBufferSize(1024 * 1024 * 10);
mClient = mServerSocket.accept();
mClient.setKeepAlive(true);
// LogUtils.logInfo(TAG, "doInBackground", "name=" + name);
byte[] bys = new byte[1024 * 100];
ByteBuffer frameBB = ByteBuffer.allocate(CameraUtil.CAMERA_PIXELS);
int len;
InputStream inputStream = mClient.getInputStream();
int length = 0;
int needDifference = 0;
boolean isOver = false;
byte[] overBytes = new byte[1024 * 100];
//create a localsocket
while ((len = inputStream.read(bys)) != -1) {
length += len;
if (length > CameraUtil.CAMERA_PIXELS) {
//说明就要截断了
int difference = length - CameraUtil.CAMERA_PIXELS;
needDifference = difference;
//上一个length需要他就成为了一个115200 一帧的数据
int beforeNeed = Math.abs(len - difference);
byte[] desBytes = new byte[beforeNeed];
System.arraycopy(bys, 0, desBytes, 0, beforeNeed);
isOver = true;
frameBB.put(desBytes);
overBytes = new byte[difference];
System.arraycopy(bys, beforeNeed, overBytes, 0, difference);
// overFrameBB.clear();
// overFrameBB.put(overBytes);
length = CameraUtil.CAMERA_PIXELS;
// Log.d(TAG, "这里超出了:");
} else {
byte[] desBytes = new byte[len];
System.arraycopy(bys, 0, desBytes, 0, len);
frameBB.put(desBytes);
// Log.d(TAG, "这里正常添加了:");
}
if (length == CameraUtil.CAMERA_PIXELS) {
// Log.d(TAG, "这里进入到了length为一帧的情况:");
//使用到的byte[]
//重置一下
length = 0;
splitYUVData(frameBB.array());
mHandler.post(new Runnable() {
@Override
public void run() {
String name = Thread.currentThread()
.getName();
// LogUtils.logInfo(TAG, "run", "name=" + name);
mGlFrameRenderer.update(mYData, mUData, mVData);
}
});
frameBB.clear();
if (isOver) {
// LogUtils.logInfo(TAG,
// "超出的时候 两者的position",
// "framebb.postion" + frameBB.position());
//frameBB = overFrameBB;
//overFrameBB.clear();
frameBB.put(overBytes);
// LogUtils.logInfo(TAG,
// "超出的时候 两者的position",
// "framebb.postiofn复制后的" + frameBB.position() + "length=" + length);
length = needDifference;
isOver = false;
}
}
// length += len;
//
// LogUtils.logInfo(TAG,
// "doInBackground",
// "frameBB.length" + frameBB.array().length + "lenght" + length + "ByteBuffer.wrap(bys, 0, len).array().length" + ByteBuffer.wrap(
// bys,
// 0,
// len)
// .array().length);
//
// int available = inputStream.available();
// LogUtils.logInfo(TAG, "doInBackground", "输入流可读的字数:" + available);
// // byte[] desBytes = new byte[len];
// //
// // System.arraycopy(bys, 0, desBytes, 0, len);
// // frameBB.put(desBytes);
//
//
// LogUtils.logInfo(TAG, "doInBackground", "len" + len);
// if (length > CameraUtil.CAMERA_PIXELS) {
// //说明就要截断了
// int difference = length - CameraUtil.CAMERA_PIXELS;
// needDifference = difference;
// //上一个length需要他就成为了一个115200 一帧的数据
// int beforeNeed = Math.abs(len - difference);
// byte[] desBytes = new byte[beforeNeed];
// System.arraycopy(bys, 0, desBytes, 0, beforeNeed);
// isOver = true;
// // frameBB.put(desBytes);
//
// LogUtils.logInfo(TAG,
// "doInBackground",
// "测试一下看各个差值分别是多少" + difference + "::" + beforeNeed + "::" + length + "::" + len);
//
// LogUtils.logInfo(TAG, "doInBackground", "超出的范围进行分割" + frameBB.position());
//
// overBytes = new byte[difference];
//
//
// System.arraycopy(bys, beforeNeed, overBytes, 0, difference);
// overFrameBB.clear();
// overFrameBB.put(overBytes);
// length = CameraUtil.CAMERA_PIXELS;
// } else {
// byte[] desBytes = new byte[len];
//
// System.arraycopy(bys, 0, desBytes, 0, len);
//
// frameBB.put(desBytes);
// LogUtils.logInfo(TAG, "doInBackground", "正常的叠加" + frameBB.position());
// }
//
// // try {
// //
// // frameBB.put(desBytes);
// //
// // } catch (Exception e) {
// // //说明要丢帧了
// q // LogUtils.logInfo(TAG, "doInBackground", "说明要丢帧了 重试一下");
// //
// // }
//
// if (length == CameraUtil.CAMERA_PIXELS) {
// final byte[] finalFrameString = frameBB.array();
// //重置一下
// length = 0;
// frameBB.clear();
//
// if (isOver) {
// //如果是超出了
// LogUtils.logInfo(TAG,
// "isOver",
// "看缓冲区的位置和difference是不是一样的:" + overFrameBB.position() + "::" + needDifference);
// frameBB = overFrameBB;
// overFrameBB.clear();
// length = needDifference;
// isOver = false;
// }
// mHandler.post(new Runnable() {
// @Override
//
// public void run() {
// LogUtils.logInfo(TAG, "hyrun", finalFrameString.length + "");
//
// // ByteArrayInputStream bais = new ByteArrayInputStream(finalFrameString);
// // YuvImage image = new YuvImage(finalFrameString, ImageFormat.NV21,320, 240, null);
// YuvImage image = new YuvImage(finalFrameString,
// ImageFormat.NV21,
// CameraUtil.CAMERA_WIDTH,
// CameraUtil.CAMERA_HEIGHT,
// null);
//
// if (image != null) {
// ByteArrayOutputStream stream = new ByteArrayOutputStream();
// image.compressToJpeg(new Rect(0,
// 0,
// CameraUtil.CAMERA_WIDTH,
// CameraUtil.CAMERA_HEIGHT),
// 100,
// stream);
// Bitmap bmp = BitmapFactory.decodeByteArray(stream.toByteArray(),
// 0,
// stream.size());
// LogUtils.logInfo(TAG, "run()", stream.size() + "::");
// mIv_camera.setImageBitmap(bmp);
//
// }
//
// }
//
//
// });
//
// }
}
// InputStream inputStream = mClient.getInputStream();
// OutputStream outputStream = mClient.getOutputStream();
// ByteArrayOutputStream swapStream = new ByteArrayOutputStream();
//
// byte buf[] = new byte[1024 * 10];
// String frameString = null;
// byte[] frame = new byte[115200];
// int len;
// long length = 0;
// int bytesRead = 0;
// int number = 0;
// int baseNum = 115200;
// StringBuilder stringBuilder = new StringBuilder();
// while ((len = inputStream.read(frame)) != -1) {
//// //swapStream.write(buf, 0, len);
//// length += len;
//// float f = (float) length / (float) 115200;
//// int i = (int) (length / 115200);
//// LogUtils.logInfo(TAG,
//// "doInBackground",
//// "这里进行了写入" + len + "::" + length + "length/len" + f);
////
//// boolean anInt = isInt(f, i);
//// LogUtils.logInfo(TAG, "doInBackground", "看是不是整数" + anInt + "buf的长度" + buf.toString());
//// stringBuilder.append(buf);
//// LogUtils.logInfo(TAG, "hyrun", "进入之前的长度" + stringBuilder.length());
//// if (anInt) {
//// frameString = stringBuilder.toString();
//// //说明 是一幅画面的倍数了
////
////
//// // byte[] byteArray = new byte[1024 * 1024 * 10];
//// //
//// //
//// // if (number == 0) {
//// // bytesRead = inputStream.read(byteArray, 0, baseNum);
//// // number++;
//// // } else {
//// // bytesRead = inputStream.read(byteArray, number * baseNum, baseNum);
//// // number++;
//// // }
//// // mMessage = new String(byteArray, Charset.forName("ISO-8859-1"));
//// // mFrameString = mMessage.toString();
////
//// final String finalFrameString = frameString;
//// mHandler.post(new Runnable() {
//// @Override
//// public void run() {
//// byte[] frame = finalFrameString.getBytes();
//// LogUtils.logInfo(TAG, "hyrun", frame.length + "");
//// ByteArrayInputStream bais = new ByteArrayInputStream(frame);
//// Bitmap map = BitmapFactory.decodeStream(bais);
//// mIv_camera.setImageBitmap(map);
//// }
//// });
////
//// frameString = "";
//// stringBuilder = new StringBuilder();
//// }
// final byte[] finalFrameString = frame;
// mHandler.post(new Runnable() {
// @Override
// public void run() {
// LogUtils.logInfo(TAG,"run","frame的大小"+finalFrameString.length+"frame的tostring"+finalFrameString.toString());
// YuvImage yuvimage =new YuvImage(finalFrameString, ImageFormat.NV21, 400, 400, null);//20、20分别是图的宽度与高度
// ByteArrayOutputStream baos = new ByteArrayOutputStream();
// yuvimage.compressToJpeg(new Rect(0, 0, 400, 400), 100, baos);//80--JPG图片的质量[0-100],100最高
// byte[] jdata = baos.toByteArray();
// Bitmap bmp = BitmapFactory.decodeByteArray(jdata, 0, jdata.length);
//
//
// mIv_camera.setImageBitmap(bmp);
// }
// });
// }
// byte[] in2b = swapStream.toByteArray();
// Log.d(TAG, "doInBackground: in2b" + in2b.length);
LogUtils.logInfo(TAG, "doInBackground", "看Camera连接上了:" + mClient.isConnected());
} catch (Exception e) {
e.printStackTrace();
LogUtils.logInfo(TAG, "doInBackground", "这里接受的时候出错了");
} finally {
try {
if (mServerSocket != null) {
mServerSocket.close();
}
if (mClient != null) {
mClient.close();
}
} catch (IOException e) {
e.printStackTrace();
}
}
return null;
}
private boolean isInt(float f, int i) {
if (f - (float) i == 0) {
//说明是整数
return true;
}
return false;
}
//获取到宽高
byte[] mYData = new byte[CameraUtil.CAMERA_WIDTH * CameraUtil.CAMERA_HEIGHT];
byte[] mUData = new byte[CameraUtil.CAMERA_WIDTH * CameraUtil.CAMERA_HEIGHT / 4];
byte[] mVData = new byte[CameraUtil.CAMERA_WIDTH * CameraUtil.CAMERA_HEIGHT / 4];
public void splitYUVData(byte[] data) {
ByteBuffer mYUVData = ByteBuffer.wrap(data);
Log.d(TAG, "HY不同的测试:splitYUVData" + data.length);
mYUVData.get(mYData, 0, CameraUtil.CAMERA_WIDTH * CameraUtil.CAMERA_HEIGHT);
mYUVData.position(CameraUtil.CAMERA_WIDTH * CameraUtil.CAMERA_HEIGHT);
mYUVData.get(mVData, 0, CameraUtil.CAMERA_WIDTH * CameraUtil.CAMERA_HEIGHT / 4);
mYUVData.position(CameraUtil.CAMERA_WIDTH * CameraUtil.CAMERA_HEIGHT * 5 / 4);
mYUVData.get(mUData, 0, CameraUtil.CAMERA_WIDTH * CameraUtil.CAMERA_HEIGHT / 4);
// int nFrameSize = CameraUtil.CAMERA_WIDTH * CameraUtil.CAMERA_HEIGHT;
// int k = 0;
//
// for (int i = 0; i < CameraUtil.CAMERA_WIDTH * CameraUtil.CAMERA_HEIGHT / 2; i += 2) {
// mVData[k] = data[nFrameSize + i]; //v
// mUData[k] = data[nFrameSize + i + 1];//u
// k++;
// }
}
} | apache-2.0 |
tianzhijiexian/CommonAdapter | lib/src/main/java/kale/adapter/item/AdapterItem.java | 691 | package kale.adapter.item;
import android.support.annotation.LayoutRes;
import android.support.annotation.NonNull;
import android.view.View;
/**
* @author Jack Tony
* @date 2015/5/15
*/
public interface AdapterItem<T> {
/**
* @return item布局文件的layoutId
*/
@LayoutRes
int getLayoutResId();
/**
* 初始化views
*/
void bindViews(@NonNull final View root);
/**
* 设置view的参数
*/
void setViews();
/**
* 根据数据来设置item的内部views
*
* @param t 数据list内部的model
* @param position 当前adapter调用item的位置
*/
void handleData(T t, int position);
} | apache-2.0 |
SSEHUB/EASyProducer | Plugins/Instantiation/Instantiator.Java/src/net/ssehub/easy/instantiation/java/JavaSettingsInitializer.java | 9927 | package net.ssehub.easy.instantiation.java;
import java.io.File;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import net.ssehub.easy.instantiation.core.model.artifactModel.Path;
import net.ssehub.easy.instantiation.core.model.artifactModel.ProjectSettings;
import net.ssehub.easy.instantiation.core.model.vilTypes.ISettingsInitializer;
import net.ssehub.easy.instantiation.core.model.vilTypes.Set;
import net.ssehub.easy.instantiation.core.model.vilTypes.TypeDescriptor;
import net.ssehub.easy.instantiation.core.model.vilTypes.TypeRegistry;
/**
* Initializes the settings for java.
*
* @author Aike Sass
*
*/
public class JavaSettingsInitializer implements ISettingsInitializer {
private static final String JAVA_CLASSPATH = System.getProperty("java.class.path");
// private static Set<String> classpathEntries;
@Override
public void initialize(File base, Map<ProjectSettings, Object> settings) {
// // Default fallback: Add the path of base to the classpath
// String seperator = String.valueOf(File.pathSeparatorChar);
// String[] splitClasspath =
// System.getProperty("java.class.path").split(seperator);
// classpathEntries = new
// HashSet<String>(Arrays.asList(splitClasspath));
// System.out.println("BASE: " + base.getAbsolutePath());
// classpathEntries.add(base.getAbsolutePath());
// String newClasspath = "";
// for (String string : classpathEntries) {
// newClasspath += string + seperator;
// }
// System.out.println("CP: " + newClasspath);
// System.setProperty("java.class.path", newClasspath);
}
/**
* Determines the classpath. A given classpath via VIL will be parsed
* accordingly.
*
* @param classpath
* classpath object
* @return classpath as string
*/
public static String[] determineClasspath(Object classpath) {
// In case the classpath is set via VIL
String[] result = null;
if (null != classpath) {
if (classpath instanceof String) {
result = normalizePath(new String[] {String.valueOf(classpath)});
} else if (classpath instanceof Set<?>) {
Set<?> classpathSet = (Set<?>) classpath;
HashSet<String> tmpClasspath = new HashSet<String>();
int parameterCount = classpathSet.getGenericParameterCount();
TypeDescriptor<?> typeDescriptorParameter = classpathSet.getGenericParameterType(parameterCount - 1);
for (Iterator<?> iterator = classpathSet.iterator(); iterator.hasNext();) {
// Validate String; if file does not exists it won't be
// added to the classpath
if (TypeRegistry.stringType().isSame(typeDescriptorParameter)) {
String string = (String) iterator.next();
File file = new File(string);
if (file.exists()) {
add(tmpClasspath, normalizePath(file.getAbsolutePath()));
}
} else if (typeDescriptorParameter.isSame(TypeRegistry.DEFAULT.findType(Path.class))) {
// Path
Path path = (Path) iterator.next();
if (path.exists()) {
add(tmpClasspath, normalizePath(path.getAbsolutePath().getAbsolutePath()));
}
} else {
// fallback: do nothing
iterator.next();
}
}
String[] tmpResult = new String[tmpClasspath.size()];
result = tmpClasspath.toArray(tmpResult);
}
} else {
// Get the classpath from eclipse
String[] systemClasspath = normalizePath(JAVA_CLASSPATH.split(File.pathSeparator));
result = systemClasspath;
}
return result;
}
/**
* Converts a path to unix file pattern.
*
* WARNING: The JDT/AST parser expects a "/" as separator!
*
* @param allPaths array containing all path elements
* @return array with converted path elements
*/
private static String[] normalizePath(String [] allPaths) {
List<String> tmp = new ArrayList<String>();
for (int i = 0; i < allPaths.length; i++) {
String normalized = normalizePath(allPaths[i]);
if (null != normalized) {
tmp.add(normalized);
}
}
String[] result = new String[tmp.size()];
return tmp.toArray(result);
}
/**
* Filters <code>allPaths</code> for resolvable paths.
*
* @param allPaths the paths to be filtered
* @return the filtered paths
*/
public static String[] filterPath(String [] allPaths) {
List<String> tmp = new ArrayList<String>();
for (int i = 0; i < allPaths.length; i++) {
String path = resolve(allPaths[i]);
if (null != path) {
tmp.add(allPaths[i]);
}
}
String[] result = new String[tmp.size()];
return tmp.toArray(result);
}
/**
* Adds <code>elt</code> to <code>elts</code>.
*
* @param elts the elements
* @param elt the element to add (may be <b>null</b>, ignored then)
*/
private static void add(HashSet<String> elts, String elt) {
if (null != elt) {
elts.add(elt);
}
}
/**
* Converts a path to unix file pattern.
*
* WARNING: The JDT/AST parser expects a "/" as separator!
*
* @param path path as string
* @return converted path (may be <b>null</b> if <code>path</code> does not exist)
*/
private static String normalizePath(String path) {
String result = resolve(path);
if (null != result) {
result = result.replace("\\", "/");
}
return result;
}
/**
* Returns a resolved path.
*
* @param path the path to resolve
* @return the resolved path or <b>null</b> if the path cannot be resolved
*/
private static String resolve(String path) {
String resolved = null;
if (null != path) {
// primitive, cannot handle Eclipse workspace root
File f = new File(path);
if (f.exists()) {
resolved = path;
}
}
return resolved;
}
// @Override
// public void initialize(File base, Map<ProjectSettings, Object> settings)
// {
// // TODO: THIS IS EXPERIMENTAL AND DOES CURRENTLY NOT WORK DUE TO HOW
// ECLIPSE HANDLES THE WORKSPACE.
// // Catch every exception in case something goes wrong. Prevents the
// plugin from crashing.
// try {
// // Outside of eclipse environment
// if (!Environment.runsInEclipse()) {
// ProjectMock project = new ProjectMock(base.getAbsoluteFile());
// // CRITICAL! If it does not exist, then the EASy-Standalone will CRASH
// AND BURN!
// System.out.println(project.classPathFileExists());
// if (project.classPathFileExists()) {
// IJavaProject javaProject = JavaCore.create(project);
// try {
// System.out.println(ClasspathEntry.TAG_ATTRIBUTE);
//// Field workspace = ResourcesPlugin.class.getDeclaredField("workspace");
//// workspace.setAccessible(true);
//// workspace.set(null, new WorkspaceMock(base.getParentFile()));
// IClasspathEntry[] entries = javaProject.getRawClasspath();
// System.out.println(entries.length);
//// String[] cpEntries = new String[entries.length];
// List<String> list = new ArrayList<String>();
// for (IClasspathEntry entry : entries) {
// list.add(entry.toString());
// System.out.println("Entry: " + entry.toString());
// }
// Set<PseudoString> entriesSet = new ArraySet<PseudoString>(list.toArray(
// new PseudoString[list.size()]), PseudoString.class);
// settings.put(JavaSettings.CLASSPATH, entriesSet);
// } catch (JavaModelException e) {
// logger.exception(e);
// }
// }
// } else if (Environment.runsInEclipse()) {
// // We are inside of eclipse environment so we can access the workspace
// and open the IProject
// IWorkspace workspace = ResourcesPlugin.getWorkspace();
// IWorkspaceRoot root = workspace.getRoot();
// IProject project = root.getProject(base.getName());
// try {
// project.create(null);
// project.open(null);
// } catch (CoreException e) {
// logger.exception(e);
// }
// // Add java nature
// IProjectDescription desc;
// try {
// desc = project.getDescription();
// desc.setNatureIds(new String[] {JavaCore.NATURE_ID });
// project.setDescription(desc, null);
// } catch (CoreException e) {
// logger.exception(e);
// }
// // Set java builders output folder
// IJavaProject javaProj = JavaCore.create(project);
// IFolder binDir = project.getFolder("bin");
// IPath binPath = binDir.getFullPath();
// try {
// javaProj.setOutputLocation(binPath, null);
// } catch (JavaModelException e) {
// logger.exception(e);
// }
// // Set classpath
// IPath path = new Path(JavaUtilities.JDK_PATH);
// IClasspathEntry cpe = JavaCore.newLibraryEntry(path, null, null);
// try {
// javaProj.setRawClasspath(new IClasspathEntry[] {cpe}, null);
// } catch (JavaModelException e) {
// logger.exception(e);
// }
// }
// //checkstyle: stop exception type check
// } catch (Exception e) {
// logger.exception(e);
// e.printStackTrace();
// }
// //checkstyle: resume exception type check
// }
}
| apache-2.0 |
KleeGroup/vertigo-stella | vertigo-stella-impl/src/main/java/io/vertigo/stella/plugins/work/redis/worker/RedisWorkerPlugin.java | 3220 | /**
* vertigo - simple java starter
*
* Copyright (C) 2013-2017, KleeGroup, direction.technique@kleegroup.com (http://www.kleegroup.com)
* KleeGroup, Centre d'affaire la Boursidiere - BP 159 - 92357 Le Plessis Robinson Cedex - France
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.vertigo.stella.plugins.work.redis.worker;
import java.util.Map;
import java.util.Optional;
import javax.inject.Inject;
import javax.inject.Named;
import io.vertigo.commons.codec.CodecManager;
import io.vertigo.lang.Activeable;
import io.vertigo.lang.Assertion;
import io.vertigo.stella.impl.node.WorkDispatcherConfUtil;
import io.vertigo.stella.impl.node.WorkerPlugin;
import io.vertigo.stella.impl.work.WorkItem;
import io.vertigo.stella.plugins.work.redis.RedisDB;
/**
* NodePlugin
* Ce plugin permet d'exécuter des travaux en mode distribué.
* REDIS est utilisé comme plateforme d'échanges.
*
* @author pchretien
*/
public final class RedisWorkerPlugin implements WorkerPlugin, Activeable {
private final Map<String, Integer> workTypes;
private final RedisDB redisDB;
@Inject
public RedisWorkerPlugin(
final CodecManager codecManager,
@Named("nodeId") final String nodeId,
@Named("workTypes") final String workTypes,
@Named("host") final String redisHost,
@Named("port") final int redisPort,
@Named("timeoutSeconds") final int timeoutSeconds,
@Named("password") final Optional<String> password) {
Assertion.checkNotNull(codecManager);
Assertion.checkArgNotEmpty(workTypes);
Assertion.checkArgNotEmpty(redisHost);
Assertion.checkArgument(timeoutSeconds < 10000, "Le timeout s'exprime en seconde.");
//-----
this.workTypes = WorkDispatcherConfUtil.readWorkTypeConf(workTypes);
redisDB = new RedisDB(codecManager, redisHost, redisPort, timeoutSeconds, password);
}
/** {@inheritDoc} */
@Override
public Map<String, Integer> getWorkTypes() {
return workTypes;
}
/** {@inheritDoc} */
@Override
public void start() {
redisDB.start();
//On enregistre le node
//redisDB.registerNode(new Node(getNodeId(), true));
}
/** {@inheritDoc} */
@Override
public void stop() {
//redisDB.registerNode(new Node(getNodeId(), false));
redisDB.stop();
}
/*public List<Node> getNodes() {
return redisDB.getNodes();
}*/
/** {@inheritDoc} */
@Override
public <R, W> WorkItem<R, W> pollWorkItem(final String workType) {
return redisDB.pollWorkItem(workType);
}
/** {@inheritDoc} */
@Override
public <R> void putResult(final String workId, final R result, final Throwable error) {
redisDB.putResult(workId, result, error);
}
/** {@inheritDoc} */
@Override
public void putStart(final String workId) {
redisDB.putStart(workId);
}
}
| apache-2.0 |
Yorxxx/playednext | app/src/test/java/com/piticlistudio/playednext/BaseTest.java | 604 | package com.piticlistudio.playednext;
import org.junit.Rule;
import org.mockito.junit.MockitoJUnit;
import org.mockito.junit.MockitoRule;
import io.reactivex.functions.Consumer;
import io.reactivex.functions.Predicate;
/**
* Base test entity.
* Automatically adds common requirements, like Mockito
* Created by jorge.garcia on 15/12/2016.
*/
public class BaseTest {
@Rule
public MockitoRule rule = MockitoJUnit.rule();
protected static <T> Predicate<T> check(Consumer<T> consumer) {
return t -> {
consumer.accept(t);
return true;
};
}
}
| apache-2.0 |
BellaDati/belladati-sdk-java | src/main/java/com/belladati/sdk/dataset/source/impl/ImportIntervalImpl.java | 619 | package com.belladati.sdk.dataset.source.impl;
import com.belladati.sdk.dataset.source.ImportInterval;
import com.belladati.sdk.dataset.source.ImportIntervalUnit;
public class ImportIntervalImpl implements ImportInterval {
private final ImportIntervalUnit unit;
private final int factor;
protected ImportIntervalImpl(ImportIntervalUnit unit, int factor) {
this.unit = unit;
this.factor = factor;
}
@Override
public int getMinutes() {
return unit.getMinutes() * factor;
}
@Override
public ImportIntervalUnit getUnit() {
return unit;
}
@Override
public int getFactor() {
return factor;
}
}
| apache-2.0 |
kr9ly/trout | src/main/java/net/kr9ly/trout/Setter.java | 908 | package net.kr9ly.trout;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Copyright 2015 kr9ly
* <br />
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <br />
* http://www.apache.org/licenses/LICENSE-2.0
* <br />
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.METHOD)
public @interface Setter {
String value();
}
| apache-2.0 |
manolisep/Android | ApplicationLifeCycle/ApplicationLifeCycle.java | 3458 | package br.com.netpoint.integraapp.com.core;
import android.app.Activity;
import android.app.Application;
import android.os.Bundle;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
/**
* Created by Emmanouil Nicolas Papadimitropoulos on 31/01/2017.
*/
public class ApplicationLifeCycle {
//Application Lifecycle
public interface AppLifeCycleCallback {
void onApplicationForeground();
void onApplicationBackground();
}
private static HashMap<String, Integer> activities;
public static void Setup(Application application){
activities = new HashMap<>();
application.registerActivityLifecycleCallbacks(new Application.ActivityLifecycleCallbacks() {
@Override
public void onActivityCreated(Activity activity, Bundle bundle) {
}
@Override
public void onActivityStarted(Activity activity) {
activities.put(activity.getLocalClassName(), 1);
notifyLifeCycleObservers();
}
@Override
public void onActivityResumed(Activity activity) {
}
@Override
public void onActivityPaused(Activity activity) {
}
@Override
public void onActivityStopped(Activity activity) {
//map Activity unique class name with 0 on foreground
activities.put(activity.getLocalClassName(), 0);
notifyLifeCycleObservers();
}
@Override
public void onActivitySaveInstanceState(Activity activity, Bundle bundle) {
}
@Override
public void onActivityDestroyed(Activity activity) {
}
});
}
//Callbacks
private static List<AppLifeCycleCallback> lifecycleObservers = new ArrayList<AppLifeCycleCallback>();
public static void registerLifecycleObserver(AppLifeCycleCallback observer){
if(lifecycleObservers.indexOf(observer) == -1){
lifecycleObservers.add(observer);
}
}
public static void unregisterLifecycleObserver(AppLifeCycleCallback observer){
if(lifecycleObservers.indexOf(observer) != -1){
lifecycleObservers.remove(observer);
}
}
private static void notifyLifeCycleObservers(){
if(isApplicationInBackground() != isApplicationInBackgroundCache) {
isApplicationInBackgroundCache = isApplicationInBackground();
for (final AppLifeCycleCallback observer : lifecycleObservers) {
new Thread(new Runnable() {
@Override
public void run() {
if (isApplicationInBackgroundCache)
observer.onApplicationBackground();
else
observer.onApplicationForeground();
}
}).start();
}
}
}
/**
* Check if any activity is in the foreground
*/
private static boolean isApplicationInBackgroundCache;
public static boolean isApplicationInBackground() {
for (String s : activities.keySet()) {
if (activities.get(s) == 1) {
return false;
}
}
return true;
}
}
| apache-2.0 |
ifnul/ums-backend | is-lnu-resource/src/main/java/org/lnu/is/resource/benefit/BenefitResource.java | 4267 | package org.lnu.is.resource.benefit;
import java.text.MessageFormat;
import java.util.Date;
import javax.validation.constraints.Min;
import javax.validation.constraints.NotNull;
import org.lnu.is.annotation.CrudableResource;
import org.lnu.is.resource.ApiResource;
/**
* Benefit Resource converter.
* @author ivanursul
*
*/
@CrudableResource
public class BenefitResource extends ApiResource {
private Long parentId;
@NotNull(message = "Field required")
@Min(value = 1, message = "Minimal value is 1")
private Long benefitTypeId;
@NotNull(message = "Field required")
private String name;
@NotNull(message = "Field required")
private String abbrName;
@NotNull(message = "Field required")
private String description;
@NotNull(message = "Field required")
private Date begDate;
@NotNull(message = "Field required")
private Date endDate;
@Override
public String getUri() {
return MessageFormat.format("/benefits/{0}", getId());
}
@Override
public String getRootUri() {
return "/benefits";
}
public Long getBenefitTypeId() {
return benefitTypeId;
}
public void setBenefitTypeId(final Long benefitTypeId) {
this.benefitTypeId = benefitTypeId;
}
public Long getParentId() {
return parentId;
}
public void setParentId(final Long parentId) {
this.parentId = parentId;
}
public String getAbbrName() {
return abbrName;
}
public void setAbbrName(final String abbrName) {
this.abbrName = abbrName;
}
public String getName() {
return name;
}
public void setName(final String name) {
this.name = name;
}
public Date getBegDate() {
return begDate;
}
public void setBegDate(final Date begDate) {
this.begDate = begDate;
}
public Date getEndDate() {
return endDate;
}
public void setEndDate(final Date endDate) {
this.endDate = endDate;
}
public String getDescription() {
return description;
}
public void setDescription(final String description) {
this.description = description;
}
@Override
public int hashCode() {
final int prime = 31;
int result = super.hashCode();
result = prime * result
+ ((abbrName == null) ? 0 : abbrName.hashCode());
result = prime * result + ((begDate == null) ? 0 : begDate.hashCode());
result = prime * result
+ ((benefitTypeId == null) ? 0 : benefitTypeId.hashCode());
result = prime * result
+ ((description == null) ? 0 : description.hashCode());
result = prime * result + ((endDate == null) ? 0 : endDate.hashCode());
result = prime * result + ((name == null) ? 0 : name.hashCode());
result = prime * result
+ ((parentId == null) ? 0 : parentId.hashCode());
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (!super.equals(obj)) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
BenefitResource other = (BenefitResource) obj;
if (abbrName == null) {
if (other.abbrName != null) {
return false;
}
} else if (!abbrName.equals(other.abbrName)) {
return false;
}
if (begDate == null) {
if (other.begDate != null) {
return false;
}
} else if (!begDate.equals(other.begDate)) {
return false;
}
if (benefitTypeId == null) {
if (other.benefitTypeId != null) {
return false;
}
} else if (!benefitTypeId.equals(other.benefitTypeId)) {
return false;
}
if (description == null) {
if (other.description != null) {
return false;
}
} else if (!description.equals(other.description)) {
return false;
}
if (endDate == null) {
if (other.endDate != null) {
return false;
}
} else if (!endDate.equals(other.endDate)) {
return false;
}
if (name == null) {
if (other.name != null) {
return false;
}
} else if (!name.equals(other.name)) {
return false;
}
if (parentId == null) {
if (other.parentId != null) {
return false;
}
} else if (!parentId.equals(other.parentId)) {
return false;
}
return true;
}
@Override
public String toString() {
return "BenefitResource [benefitTypeId=" + benefitTypeId
+ ", parentId=" + parentId + ", abbrName=" + abbrName
+ ", name=" + name + ", begDate=" + begDate + ", endDate="
+ endDate + ", description=" + description + "]";
}
}
| apache-2.0 |
rbieniek/flow-ninja | ninja-persistence-generic/src/main/java/org/flowninja/persistence/generic/services/IAdminPersistenceService.java | 2465 | /**
*
*/
package org.flowninja.persistence.generic.services;
import java.util.Set;
import org.flowninja.persistence.generic.types.AdminRecord;
import org.flowninja.persistence.generic.types.RecordAlreadyExistsException;
import org.flowninja.persistence.generic.types.RecordNotFoundException;
import org.flowninja.types.generic.AdminKey;
import org.flowninja.types.generic.AuthorityKey;
/**
* Service interface to be implemented by persistence providers for admin records
*
* @author rainer
*
*/
public interface IAdminPersistenceService {
/**
* Login an admin.
*
* @param userName user name
* @param password password
* @return the admin record if login is sucessful, <code>null</code> otherwise
*/
public AdminRecord login(String userName, String password);
/**
* Find an admin record by the user name
*
* @param userName the user name
* @return the admin record or <code>null</code>
*/
public AdminRecord findByUserName(String userName);
/**
* Find an admin record by the key
*
* @param key the admin key to look up
* @return the admin record or <code>null</code>
*/
public AdminRecord findByKey(AdminKey key);
/**
* List all admin records
*
* @return a set of all known admin records
*/
public Set<AdminRecord> listAdmins();
/**
* Create an admin record
*
* @param userName the user name
* @param password the password
* @param authorities authorites assigned to admin
*
* @return
*
* @throws RecordAlreadyExistsException admin account already exists
*/
public AdminRecord createAdmin(String userName, String password,
Set<AuthorityKey> authorities) throws RecordAlreadyExistsException;
/**
* Assign authorities to an admin
*
* @param key
* @param authorities
* @return
* @throws RecordNotFoundException admin not found
*/
public AdminRecord assignAuthorities(AdminKey key, Set<AuthorityKey> authorities) throws RecordNotFoundException;
/**
* Assign a new password to an administrator
*
* @param key adminstator key
* @param password new password
* @return the administator record
* @throws RecordNotFoundException administrator key not found in database
*/
public AdminRecord assignPassword(AdminKey key, String password) throws RecordNotFoundException;
/**
* delete an admin record
*
* @param key
* @throws RecordNotFoundException
*/
public void deleteAdmin(AdminKey key) throws RecordNotFoundException;
}
| apache-2.0 |
Servoy/wicket | wicket-ioc/src/main/java/org/apache/wicket/injection/web/InjectorHolder.java | 3317 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.wicket.injection.web;
import org.apache.wicket.Application;
import org.apache.wicket.MetaDataKey;
import org.apache.wicket.injection.ConfigurableInjector;
/**
* Holds a reference to the injector that will be used to automatically initialize components that
* are used. Usually the application class should set the injector in the holder when it
* initializes.
* <p>
* This class can be used for unit-testing to switch the standard injector with an injector that
* will lookup dependencies from a mock application context.
* <p>
*
* <pre>
* class MockSpringInjector extends SpringInjector
* {
* protected ISpringContextLocator getContextLocator()
* {
* return new MockContextLocator();
* }
* }
*
* InjectorHolder.setInjector(new MockSpringInjector());
*
* // from this point on InjectableWebPage and InjectablePanel
* // will be injected using the MockSpringInjector
* </pre>
*
* @author Igor Vaynberg (ivaynberg)
*
* TODO shouldn't we move this class to org.apache.wicket.injection ?
*/
public class InjectorHolder
{
private static final MetaDataKey<ConfigurableInjector> INJECTOR_KEY = new MetaDataKey<ConfigurableInjector>()
{
private static final long serialVersionUID = 1L;
};
/**
* Gets an injector
*
* NOTICE this method is not thread safe if setInjector() is used
*
* @return injector
*/
public static ConfigurableInjector getInjector()
{
ConfigurableInjector injector = Application.get().getMetaData(INJECTOR_KEY);
if (injector == null)
{
throw new IllegalStateException("InjectorHolder has not been assigned an injector. "
+ "Use InjectorHolder.setInjector() to assign an injector. "
+ "In most cases this should be done once inside "
+ "your WebApplication subclass's init() method.");
}
return injector;
}
/**
* Sets an injector
*
* NOTICE this method is not thread safe.
*
* @param newInjector
* new injector
*/
public static void setInjector(ConfigurableInjector newInjector)
{
Application application = Application.get();
setInjector(newInjector, application);
}
/**
* Sets an injector
*
* NOTICE this method is not thread safe.
*
* @param newInjector
* new injector
* @param application
* application instance to which the injector will be bound
*/
public static void setInjector(ConfigurableInjector newInjector, Application application)
{
application.setMetaData(INJECTOR_KEY, newInjector);
}
}
| apache-2.0 |
xushaomin/apple-auto | apple-auto-storager/apple-auto-storager-location/src/main/java/com/appleframework/auto/storager/location/key/LocationRowkey.java | 990 | package com.appleframework.auto.storager.location.key;
import org.apache.hadoop.hbase.util.Bytes;
import com.appleframework.auto.bean.location.Location;
import com.appleframework.auto.storager.location.utils.StringUtils;
import com.appleframework.data.hbase.client.RowKey;
public class LocationRowkey implements RowKey {
private String row;
public LocationRowkey(Location bo) {
String account = bo.getAccount();
String accountS = StringUtils.zeroBeforeFill(account, 16);
this.row = accountS + bo.getTime();
}
public LocationRowkey(String row) {
this.row = row;
}
public static LocationRowkey create(Location bo) {
return new LocationRowkey(bo);
}
public static LocationRowkey create(String row) {
return new LocationRowkey(row);
}
@Override
public byte[] toBytes() {
return Bytes.toBytes(row);
}
public String getRow() {
return row;
}
@Override
public String toString() {
return row;
}
} | apache-2.0 |
netopyr/reduxfx | reduxfx-view/src/main/java/com/netopyr/reduxfx/vscenegraph/builders/ButtonBaseBuilder.java | 1831 | package com.netopyr.reduxfx.vscenegraph.builders;
import com.netopyr.reduxfx.vscenegraph.VNode;
import com.netopyr.reduxfx.vscenegraph.event.VEventHandler;
import com.netopyr.reduxfx.vscenegraph.event.VEventType;
import com.netopyr.reduxfx.vscenegraph.property.VProperty;
import javafx.event.ActionEvent;
import io.vavr.collection.Array;
import io.vavr.collection.Map;
import io.vavr.control.Option;
import org.apache.commons.lang3.builder.ToStringBuilder;
import org.apache.commons.lang3.builder.ToStringStyle;
import static com.netopyr.reduxfx.vscenegraph.event.VEventType.ACTION;
public class ButtonBaseBuilder<B extends ButtonBaseBuilder<B>> extends LabeledBuilder<B> {
public ButtonBaseBuilder(Class<?> nodeClass,
Map<String, Array<VNode>> childrenMap,
Map<String, Option<VNode>> singleChildMap,
Map<String, VProperty> properties,
Map<VEventType, VEventHandler> eventHandlers) {
super(nodeClass, childrenMap, singleChildMap, properties, eventHandlers);
}
@SuppressWarnings("unchecked")
@Override
protected B create(
Map<String, Array<VNode>> childrenMap,
Map<String, Option<VNode>> singleChildMap,
Map<String, VProperty> properties,
Map<VEventType, VEventHandler> eventHandlers) {
return (B) new ButtonBaseBuilder<>(getNodeClass(), childrenMap, singleChildMap, properties, eventHandlers);
}
public B onAction(VEventHandler<ActionEvent> eventHandler) {
return onEvent(ACTION, eventHandler);
}
@Override
public String toString() {
return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE)
.appendSuper(super.toString())
.toString();
}
}
| apache-2.0 |
madanadit/alluxio | tests/src/test/java/alluxio/server/ft/MultiWorkerIntegrationTest.java | 9353 | /*
* The Alluxio Open Foundation licenses this work under the Apache License, version 2.0
* (the "License"). You may not use this work except in compliance with the License, which is
* available at www.apache.org/licenses/LICENSE-2.0
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied, as more fully set forth in the License.
*
* See the NOTICE file distributed with this work for information regarding copyright ownership.
*/
package alluxio.server.ft;
import static org.junit.Assert.assertEquals;
import alluxio.AlluxioURI;
import alluxio.Constants;
import alluxio.client.block.policy.BlockLocationPolicy;
import alluxio.client.block.policy.options.GetWorkerOptions;
import alluxio.client.file.FileSystemContext;
import alluxio.conf.AlluxioConfiguration;
import alluxio.conf.PropertyKey;
import alluxio.client.WriteType;
import alluxio.client.block.AlluxioBlockStore;
import alluxio.client.block.BlockWorkerInfo;
import alluxio.client.file.FileInStream;
import alluxio.client.file.FileSystem;
import alluxio.client.file.FileSystemTestUtils;
import alluxio.client.file.URIStatus;
import alluxio.client.file.options.InStreamOptions;
import alluxio.client.file.options.OutStreamOptions;
import alluxio.conf.ServerConfiguration;
import alluxio.grpc.CreateFilePOptions;
import alluxio.grpc.OpenFilePOptions;
import alluxio.grpc.WritePType;
import alluxio.testutils.BaseIntegrationTest;
import alluxio.testutils.LocalAlluxioClusterResource;
import alluxio.util.io.BufferUtils;
import alluxio.wire.BlockInfo;
import alluxio.wire.FileBlockInfo;
import alluxio.wire.WorkerNetAddress;
import com.google.common.io.ByteStreams;
import org.apache.commons.io.IOUtils;
import org.junit.Assert;
import org.junit.Rule;
import org.junit.Test;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.List;
import java.util.stream.StreamSupport;
/**
* Tests a cluster containing multiple workers.
*/
public final class MultiWorkerIntegrationTest extends BaseIntegrationTest {
private static final int NUM_WORKERS = 4;
private static final int WORKER_MEMORY_SIZE_BYTES = Constants.MB;
private static final int BLOCK_SIZE_BYTES = WORKER_MEMORY_SIZE_BYTES / 2;
public static class FindFirstBlockLocationPolicy implements BlockLocationPolicy {
// Set this prior to sending the create request to FSM.
private static WorkerNetAddress sWorkerAddress;
public FindFirstBlockLocationPolicy(AlluxioConfiguration conf) {
}
@Override
public WorkerNetAddress getWorker(GetWorkerOptions options) {
return StreamSupport.stream(options.getBlockWorkerInfos().spliterator(), false)
.filter(x -> x.getNetAddress().equals(sWorkerAddress)).findFirst().get()
.getNetAddress();
}
}
@Rule
public LocalAlluxioClusterResource mResource =
new LocalAlluxioClusterResource.Builder()
.setProperty(PropertyKey.WORKER_MEMORY_SIZE, WORKER_MEMORY_SIZE_BYTES)
.setProperty(PropertyKey.USER_BLOCK_SIZE_BYTES_DEFAULT, BLOCK_SIZE_BYTES)
.setProperty(PropertyKey.USER_FILE_BUFFER_BYTES, BLOCK_SIZE_BYTES)
.setNumWorkers(NUM_WORKERS)
.build();
@Test
@LocalAlluxioClusterResource.Config(confParams = {
PropertyKey.Name.USER_BLOCK_WRITE_LOCATION_POLICY,
"alluxio.client.block.policy.RoundRobinPolicy"
})
public void writeLargeFile() throws Exception {
int fileSize = NUM_WORKERS * WORKER_MEMORY_SIZE_BYTES;
AlluxioURI file = new AlluxioURI("/test");
FileSystem fs = mResource.get().getClient();
FileSystemTestUtils.createByteFile(fs, file.getPath(), fileSize,
CreateFilePOptions.newBuilder().setWriteType(WritePType.MUST_CACHE).build());
URIStatus status = fs.getStatus(file);
assertEquals(100, status.getInAlluxioPercentage());
try (FileInStream inStream = fs.openFile(file)) {
assertEquals(fileSize, IOUtils.toByteArray(inStream).length);
}
}
@Test
@LocalAlluxioClusterResource.Config(confParams = {PropertyKey.Name.USER_SHORT_CIRCUIT_ENABLED,
"false", PropertyKey.Name.USER_BLOCK_SIZE_BYTES_DEFAULT, "16MB",
PropertyKey.Name.USER_NETWORK_READER_CHUNK_SIZE_BYTES, "64KB",
PropertyKey.Name.WORKER_MEMORY_SIZE, "1GB"})
public void readRecoverFromLostWorker() throws Exception {
int offset = 17 * Constants.MB;
int length = 33 * Constants.MB;
int total = offset + length;
// creates a test file on one worker
AlluxioURI filePath = new AlluxioURI("/test");
createFileOnWorker(total, filePath, mResource.get().getWorkerAddress());
FileSystem fs = mResource.get().getClient();
try (FileInStream in = fs.openFile(filePath, OpenFilePOptions.getDefaultInstance())) {
byte[] buf = new byte[total];
int size = in.read(buf, 0, offset);
replicateFileBlocks(filePath);
mResource.get().getWorkerProcess().stop();
size += in.read(buf, offset, length);
Assert.assertEquals(total, size);
Assert.assertTrue(BufferUtils.equalIncreasingByteArray(offset, size, buf));
}
}
@Test
@LocalAlluxioClusterResource.Config(confParams = {PropertyKey.Name.USER_SHORT_CIRCUIT_ENABLED,
"false", PropertyKey.Name.USER_BLOCK_SIZE_BYTES_DEFAULT, "4MB",
PropertyKey.Name.USER_NETWORK_READER_CHUNK_SIZE_BYTES, "64KB",
PropertyKey.Name.WORKER_MEMORY_SIZE, "1GB"})
public void readOneRecoverFromLostWorker() throws Exception {
int offset = 1 * Constants.MB;
int length = 5 * Constants.MB;
int total = offset + length;
// creates a test file on one worker
AlluxioURI filePath = new AlluxioURI("/test");
FileSystem fs = mResource.get().getClient();
createFileOnWorker(total, filePath, mResource.get().getWorkerAddress());
try (FileInStream in = fs.openFile(filePath, OpenFilePOptions.getDefaultInstance())) {
byte[] buf = new byte[total];
int size = in.read(buf, 0, offset);
replicateFileBlocks(filePath);
mResource.get().getWorkerProcess().stop();
for (int i = 0; i < length; i++) {
int result = in.read();
Assert.assertEquals(result, (i + size) & 0xff);
}
}
}
@Test
@LocalAlluxioClusterResource.Config(confParams = {PropertyKey.Name.USER_SHORT_CIRCUIT_ENABLED,
"false", PropertyKey.Name.USER_BLOCK_SIZE_BYTES_DEFAULT, "4MB",
PropertyKey.Name.USER_NETWORK_READER_CHUNK_SIZE_BYTES, "64KB",
PropertyKey.Name.WORKER_MEMORY_SIZE, "1GB"})
public void positionReadRecoverFromLostWorker() throws Exception {
int offset = 1 * Constants.MB;
int length = 7 * Constants.MB;
int total = offset + length;
// creates a test file on one worker
AlluxioURI filePath = new AlluxioURI("/test");
FileSystem fs = mResource.get().getClient();
createFileOnWorker(total, filePath, mResource.get().getWorkerAddress());
try (FileInStream in = fs.openFile(filePath, OpenFilePOptions.getDefaultInstance())) {
byte[] buf = new byte[length];
replicateFileBlocks(filePath);
mResource.get().getWorkerProcess().stop();
int size = in.positionedRead(offset, buf, 0, length);
Assert.assertEquals(length, size);
Assert.assertTrue(BufferUtils.equalIncreasingByteArray(offset, size, buf));
}
}
private void createFileOnWorker(int total, AlluxioURI filePath, WorkerNetAddress address)
throws IOException {
FindFirstBlockLocationPolicy.sWorkerAddress = address;
String previousPolicy = ServerConfiguration.get(PropertyKey.USER_BLOCK_WRITE_LOCATION_POLICY);
// This only works because the client instance hasn't been created yet.
ServerConfiguration.set(PropertyKey.USER_BLOCK_WRITE_LOCATION_POLICY,
FindFirstBlockLocationPolicy.class.getName());
FileSystemTestUtils.createByteFile(mResource.get().getClient(), filePath,
CreateFilePOptions.newBuilder().setWriteType(WritePType.MUST_CACHE).build(),
total);
ServerConfiguration.set(PropertyKey.USER_BLOCK_WRITE_LOCATION_POLICY, previousPolicy);
}
private void replicateFileBlocks(AlluxioURI filePath) throws Exception {
FileSystemContext fsContext = FileSystemContext.create(ServerConfiguration.global());
AlluxioBlockStore store = AlluxioBlockStore.create(fsContext);
URIStatus status = mResource.get().getClient().getStatus(filePath);
List<FileBlockInfo> blocks = status.getFileBlockInfos();
List<BlockWorkerInfo> workers = store.getAllWorkers();
for (FileBlockInfo block : blocks) {
BlockInfo blockInfo = block.getBlockInfo();
WorkerNetAddress src = blockInfo.getLocations().get(0).getWorkerAddress();
WorkerNetAddress dest = workers.stream()
.filter(candidate -> !candidate.getNetAddress().equals(src))
.findFirst()
.get()
.getNetAddress();
try (OutputStream outStream = store.getOutStream(blockInfo.getBlockId(),
blockInfo.getLength(), dest, OutStreamOptions.defaults(fsContext.getClientContext())
.setBlockSizeBytes(8 * Constants.MB).setWriteType(WriteType.MUST_CACHE))) {
try (InputStream inStream = store.getInStream(blockInfo.getBlockId(),
new InStreamOptions(status, ServerConfiguration.global()))) {
ByteStreams.copy(inStream, outStream);
}
}
}
}
}
| apache-2.0 |
davidsky11/DesignMode | src/main/java/com/kv/creative/factory/OracleDaoFactory.java | 226 | package com.kv.creative.factory;
public class OracleDaoFactory implements IFactory {
public IUserDao createUserDao() {
return new UserOracleDao();
}
public IDeptDao createDeptDao() {
return new DeptOracleDao();
}
}
| apache-2.0 |
McLeodMoores/starling | projects/integration/src/main/java/com/opengamma/integration/viewer/status/impl/ViewStatusKeyBean.java | 12912 | /**
* Copyright (C) 2013 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.integration.viewer.status.impl;
import java.util.Map;
import org.joda.beans.Bean;
import org.joda.beans.BeanBuilder;
import org.joda.beans.BeanDefinition;
import org.joda.beans.JodaBeanUtils;
import org.joda.beans.MetaProperty;
import org.joda.beans.Property;
import org.joda.beans.PropertyDefinition;
import org.joda.beans.impl.direct.DirectBean;
import org.joda.beans.impl.direct.DirectBeanBuilder;
import org.joda.beans.impl.direct.DirectMetaBean;
import org.joda.beans.impl.direct.DirectMetaProperty;
import org.joda.beans.impl.direct.DirectMetaPropertyMap;
import com.opengamma.integration.viewer.status.ViewStatusKey;
import com.opengamma.util.ArgumentChecker;
/**
*
*/
@BeanDefinition
public class ViewStatusKeyBean extends DirectBean implements ViewStatusKey {
@PropertyDefinition(validate = "notNull", overrideGet = true)
private String _securityType;
@PropertyDefinition(validate = "notNull", overrideGet = true)
private String _valueRequirementName;
@PropertyDefinition(validate = "notNull", overrideGet = true)
private String _currency;
@PropertyDefinition(validate = "notNull", overrideGet = true)
private String _targetType;
/**
* Creates an instance.
*
* @param securityType
* the security type, not-null.
* @param valueRequirementName
* the value name, not-null.
* @param currency
* the currency, not-null.
* @param targetType
* the target type, not-null.
*/
public ViewStatusKeyBean(final String securityType, final String valueRequirementName, final String currency, final String targetType) {
ArgumentChecker.notNull(securityType, "securityType");
ArgumentChecker.notNull(valueRequirementName, "valueRequirementName");
ArgumentChecker.notNull(currency, "currency");
ArgumentChecker.notNull(targetType, "targetType");
setSecurityType(securityType);
setValueRequirementName(valueRequirementName);
setCurrency(currency);
setTargetType(targetType);
}
/**
* Constructor for builder.
*/
ViewStatusKeyBean() {
}
//------------------------- AUTOGENERATED START -------------------------
///CLOVER:OFF
/**
* The meta-bean for {@code ViewStatusKeyBean}.
* @return the meta-bean, not null
*/
public static ViewStatusKeyBean.Meta meta() {
return ViewStatusKeyBean.Meta.INSTANCE;
}
static {
JodaBeanUtils.registerMetaBean(ViewStatusKeyBean.Meta.INSTANCE);
}
@Override
public ViewStatusKeyBean.Meta metaBean() {
return ViewStatusKeyBean.Meta.INSTANCE;
}
//-----------------------------------------------------------------------
/**
* Gets the securityType.
* @return the value of the property, not null
*/
@Override
public String getSecurityType() {
return _securityType;
}
/**
* Sets the securityType.
* @param securityType the new value of the property, not null
*/
public void setSecurityType(String securityType) {
JodaBeanUtils.notNull(securityType, "securityType");
this._securityType = securityType;
}
/**
* Gets the the {@code securityType} property.
* @return the property, not null
*/
public final Property<String> securityType() {
return metaBean().securityType().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the valueRequirementName.
* @return the value of the property, not null
*/
@Override
public String getValueRequirementName() {
return _valueRequirementName;
}
/**
* Sets the valueRequirementName.
* @param valueRequirementName the new value of the property, not null
*/
public void setValueRequirementName(String valueRequirementName) {
JodaBeanUtils.notNull(valueRequirementName, "valueRequirementName");
this._valueRequirementName = valueRequirementName;
}
/**
* Gets the the {@code valueRequirementName} property.
* @return the property, not null
*/
public final Property<String> valueRequirementName() {
return metaBean().valueRequirementName().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the currency.
* @return the value of the property, not null
*/
@Override
public String getCurrency() {
return _currency;
}
/**
* Sets the currency.
* @param currency the new value of the property, not null
*/
public void setCurrency(String currency) {
JodaBeanUtils.notNull(currency, "currency");
this._currency = currency;
}
/**
* Gets the the {@code currency} property.
* @return the property, not null
*/
public final Property<String> currency() {
return metaBean().currency().createProperty(this);
}
//-----------------------------------------------------------------------
/**
* Gets the targetType.
* @return the value of the property, not null
*/
@Override
public String getTargetType() {
return _targetType;
}
/**
* Sets the targetType.
* @param targetType the new value of the property, not null
*/
public void setTargetType(String targetType) {
JodaBeanUtils.notNull(targetType, "targetType");
this._targetType = targetType;
}
/**
* Gets the the {@code targetType} property.
* @return the property, not null
*/
public final Property<String> targetType() {
return metaBean().targetType().createProperty(this);
}
//-----------------------------------------------------------------------
@Override
public ViewStatusKeyBean clone() {
return JodaBeanUtils.cloneAlways(this);
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (obj != null && obj.getClass() == this.getClass()) {
ViewStatusKeyBean other = (ViewStatusKeyBean) obj;
return JodaBeanUtils.equal(getSecurityType(), other.getSecurityType()) &&
JodaBeanUtils.equal(getValueRequirementName(), other.getValueRequirementName()) &&
JodaBeanUtils.equal(getCurrency(), other.getCurrency()) &&
JodaBeanUtils.equal(getTargetType(), other.getTargetType());
}
return false;
}
@Override
public int hashCode() {
int hash = getClass().hashCode();
hash = hash * 31 + JodaBeanUtils.hashCode(getSecurityType());
hash = hash * 31 + JodaBeanUtils.hashCode(getValueRequirementName());
hash = hash * 31 + JodaBeanUtils.hashCode(getCurrency());
hash = hash * 31 + JodaBeanUtils.hashCode(getTargetType());
return hash;
}
@Override
public String toString() {
StringBuilder buf = new StringBuilder(160);
buf.append("ViewStatusKeyBean{");
int len = buf.length();
toString(buf);
if (buf.length() > len) {
buf.setLength(buf.length() - 2);
}
buf.append('}');
return buf.toString();
}
protected void toString(StringBuilder buf) {
buf.append("securityType").append('=').append(JodaBeanUtils.toString(getSecurityType())).append(',').append(' ');
buf.append("valueRequirementName").append('=').append(JodaBeanUtils.toString(getValueRequirementName())).append(',').append(' ');
buf.append("currency").append('=').append(JodaBeanUtils.toString(getCurrency())).append(',').append(' ');
buf.append("targetType").append('=').append(JodaBeanUtils.toString(getTargetType())).append(',').append(' ');
}
//-----------------------------------------------------------------------
/**
* The meta-bean for {@code ViewStatusKeyBean}.
*/
public static class Meta extends DirectMetaBean {
/**
* The singleton instance of the meta-bean.
*/
static final Meta INSTANCE = new Meta();
/**
* The meta-property for the {@code securityType} property.
*/
private final MetaProperty<String> _securityType = DirectMetaProperty.ofReadWrite(
this, "securityType", ViewStatusKeyBean.class, String.class);
/**
* The meta-property for the {@code valueRequirementName} property.
*/
private final MetaProperty<String> _valueRequirementName = DirectMetaProperty.ofReadWrite(
this, "valueRequirementName", ViewStatusKeyBean.class, String.class);
/**
* The meta-property for the {@code currency} property.
*/
private final MetaProperty<String> _currency = DirectMetaProperty.ofReadWrite(
this, "currency", ViewStatusKeyBean.class, String.class);
/**
* The meta-property for the {@code targetType} property.
*/
private final MetaProperty<String> _targetType = DirectMetaProperty.ofReadWrite(
this, "targetType", ViewStatusKeyBean.class, String.class);
/**
* The meta-properties.
*/
private final Map<String, MetaProperty<?>> _metaPropertyMap$ = new DirectMetaPropertyMap(
this, null,
"securityType",
"valueRequirementName",
"currency",
"targetType");
/**
* Restricted constructor.
*/
protected Meta() {
}
@Override
protected MetaProperty<?> metaPropertyGet(String propertyName) {
switch (propertyName.hashCode()) {
case 808245914: // securityType
return _securityType;
case 1646585789: // valueRequirementName
return _valueRequirementName;
case 575402001: // currency
return _currency;
case 486622315: // targetType
return _targetType;
}
return super.metaPropertyGet(propertyName);
}
@Override
public BeanBuilder<? extends ViewStatusKeyBean> builder() {
return new DirectBeanBuilder<ViewStatusKeyBean>(new ViewStatusKeyBean());
}
@Override
public Class<? extends ViewStatusKeyBean> beanType() {
return ViewStatusKeyBean.class;
}
@Override
public Map<String, MetaProperty<?>> metaPropertyMap() {
return _metaPropertyMap$;
}
//-----------------------------------------------------------------------
/**
* The meta-property for the {@code securityType} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> securityType() {
return _securityType;
}
/**
* The meta-property for the {@code valueRequirementName} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> valueRequirementName() {
return _valueRequirementName;
}
/**
* The meta-property for the {@code currency} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> currency() {
return _currency;
}
/**
* The meta-property for the {@code targetType} property.
* @return the meta-property, not null
*/
public final MetaProperty<String> targetType() {
return _targetType;
}
//-----------------------------------------------------------------------
@Override
protected Object propertyGet(Bean bean, String propertyName, boolean quiet) {
switch (propertyName.hashCode()) {
case 808245914: // securityType
return ((ViewStatusKeyBean) bean).getSecurityType();
case 1646585789: // valueRequirementName
return ((ViewStatusKeyBean) bean).getValueRequirementName();
case 575402001: // currency
return ((ViewStatusKeyBean) bean).getCurrency();
case 486622315: // targetType
return ((ViewStatusKeyBean) bean).getTargetType();
}
return super.propertyGet(bean, propertyName, quiet);
}
@Override
protected void propertySet(Bean bean, String propertyName, Object newValue, boolean quiet) {
switch (propertyName.hashCode()) {
case 808245914: // securityType
((ViewStatusKeyBean) bean).setSecurityType((String) newValue);
return;
case 1646585789: // valueRequirementName
((ViewStatusKeyBean) bean).setValueRequirementName((String) newValue);
return;
case 575402001: // currency
((ViewStatusKeyBean) bean).setCurrency((String) newValue);
return;
case 486622315: // targetType
((ViewStatusKeyBean) bean).setTargetType((String) newValue);
return;
}
super.propertySet(bean, propertyName, newValue, quiet);
}
@Override
protected void validate(Bean bean) {
JodaBeanUtils.notNull(((ViewStatusKeyBean) bean)._securityType, "securityType");
JodaBeanUtils.notNull(((ViewStatusKeyBean) bean)._valueRequirementName, "valueRequirementName");
JodaBeanUtils.notNull(((ViewStatusKeyBean) bean)._currency, "currency");
JodaBeanUtils.notNull(((ViewStatusKeyBean) bean)._targetType, "targetType");
}
}
///CLOVER:ON
//-------------------------- AUTOGENERATED END --------------------------
}
| apache-2.0 |
JerryLead/spark | core/src/main/java/org/apache/spark/memory/TaskMemoryManager.java | 18238 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.memory;
import javax.annotation.concurrent.GuardedBy;
import java.io.IOException;
import java.util.Arrays;
import java.util.ArrayList;
import java.util.BitSet;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import com.google.common.annotations.VisibleForTesting;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.spark.unsafe.memory.MemoryBlock;
import org.apache.spark.util.Utils;
/**
* Manages the memory allocated by an individual task.
* <p>
* Most of the complexity in this class deals with encoding of off-heap addresses into 64-bit longs.
* In off-heap mode, memory can be directly addressed with 64-bit longs. In on-heap mode, memory is
* addressed by the combination of a base Object reference and a 64-bit offset within that object.
* This is a problem when we want to store pointers to data structures inside of other structures,
* such as record pointers inside hashmaps or sorting buffers. Even if we decided to use 128 bits
* to address memory, we can't just store the address of the base object since it's not guaranteed
* to remain stable as the heap gets reorganized due to GC.
* <p>
* Instead, we use the following approach to encode record pointers in 64-bit longs: for off-heap
* mode, just store the raw address, and for on-heap mode use the upper 13 bits of the address to
* store a "page number" and the lower 51 bits to store an offset within this page. These page
* numbers are used to index into a "page table" array inside of the MemoryManager in order to
* retrieve the base object.
* <p>
* This allows us to address 8192 pages. In on-heap mode, the maximum page size is limited by the
* maximum size of a long[] array, allowing us to address 8192 * 2^32 * 8 bytes, which is
* approximately 35 terabytes of memory.
*/
public class TaskMemoryManager {
private static final Logger logger = LoggerFactory.getLogger(TaskMemoryManager.class);
/** The number of bits used to address the page table. */
private static final int PAGE_NUMBER_BITS = 13;
/** The number of bits used to encode offsets in data pages. */
@VisibleForTesting
static final int OFFSET_BITS = 64 - PAGE_NUMBER_BITS; // 51
/** The number of entries in the page table. */
private static final int PAGE_TABLE_SIZE = 1 << PAGE_NUMBER_BITS;
/**
* Maximum supported data page size (in bytes). In principle, the maximum addressable page size is
* (1L << OFFSET_BITS) bytes, which is 2+ petabytes. However, the on-heap allocator's
* maximum page size is limited by the maximum amount of data that can be stored in a long[]
* array, which is (2^32 - 1) * 8 bytes (or 16 gigabytes). Therefore, we cap this at 16 gigabytes.
*/
public static final long MAXIMUM_PAGE_SIZE_BYTES = ((1L << 31) - 1) * 8L;
/** Bit mask for the lower 51 bits of a long. */
private static final long MASK_LONG_LOWER_51_BITS = 0x7FFFFFFFFFFFFL;
/**
* Similar to an operating system's page table, this array maps page numbers into base object
* pointers, allowing us to translate between the hashtable's internal 64-bit address
* representation and the baseObject+offset representation which we use to support both in- and
* off-heap addresses. When using an off-heap allocator, every entry in this map will be `null`.
* When using an in-heap allocator, the entries in this map will point to pages' base objects.
* Entries are added to this map as new data pages are allocated.
*/
private final MemoryBlock[] pageTable = new MemoryBlock[PAGE_TABLE_SIZE];
/**
* Bitmap for tracking free pages.
*/
private final BitSet allocatedPages = new BitSet(PAGE_TABLE_SIZE);
private final MemoryManager memoryManager;
private final long taskAttemptId;
/**
* Tracks whether we're in-heap or off-heap. For off-heap, we short-circuit most of these methods
* without doing any masking or lookups. Since this branching should be well-predicted by the JIT,
* this extra layer of indirection / abstraction hopefully shouldn't be too expensive.
*/
final MemoryMode tungstenMemoryMode;
/**
* Tracks spillable memory consumers.
*/
@GuardedBy("this")
private final HashSet<MemoryConsumer> consumers;
/**
* The amount of memory that is acquired but not used.
*/
private volatile long acquiredButNotUsed = 0L;
/**
* Construct a new TaskMemoryManager.
*/
public TaskMemoryManager(MemoryManager memoryManager, long taskAttemptId) {
this.tungstenMemoryMode = memoryManager.tungstenMemoryMode();
this.memoryManager = memoryManager;
this.taskAttemptId = taskAttemptId;
this.consumers = new HashSet<>();
}
/**
* Acquire N bytes of memory for a consumer. If there is no enough memory, it will call
* spill() of consumers to release more memory.
*
* @return number of bytes successfully granted (<= N).
*/
public long acquireExecutionMemory(long required, MemoryConsumer consumer) {
assert(required >= 0);
assert(consumer != null);
MemoryMode mode = consumer.getMode();
// If we are allocating Tungsten pages off-heap and receive a request to allocate on-heap
// memory here, then it may not make sense to spill since that would only end up freeing
// off-heap memory. This is subject to change, though, so it may be risky to make this
// optimization now in case we forget to undo it late when making changes.
synchronized (this) {
long got = memoryManager.acquireExecutionMemory(required, taskAttemptId, mode);
// Try to release memory from other consumers first, then we can reduce the frequency of
// spilling, avoid to have too many spilled files.
if (got < required) {
// Call spill() on other consumers to release memory
// Sort the consumers according their memory usage. So we avoid spilling the same consumer
// which is just spilled in last few times and re-spilling on it will produce many small
// spill files.
TreeMap<Long, List<MemoryConsumer>> sortedConsumers = new TreeMap<>();
for (MemoryConsumer c: consumers) {
if (c != consumer && c.getUsed() > 0 && c.getMode() == mode) {
long key = c.getUsed();
List<MemoryConsumer> list = sortedConsumers.computeIfAbsent(key, k -> new ArrayList<>(1));
list.add(c);
}
}
while (!sortedConsumers.isEmpty()) {
// Get the consumer using the least memory more than the remaining required memory.
Map.Entry<Long, List<MemoryConsumer>> currentEntry =
sortedConsumers.ceilingEntry(required - got);
// No consumer has used memory more than the remaining required memory.
// Get the consumer of largest used memory.
if (currentEntry == null) {
currentEntry = sortedConsumers.lastEntry();
}
List<MemoryConsumer> cList = currentEntry.getValue();
MemoryConsumer c = cList.remove(cList.size() - 1);
if (cList.isEmpty()) {
sortedConsumers.remove(currentEntry.getKey());
}
try {
long released = c.spill(required - got, consumer);
if (released > 0) {
logger.debug("Task {} released {} from {} for {}", taskAttemptId,
Utils.bytesToString(released), c, consumer);
got += memoryManager.acquireExecutionMemory(required - got, taskAttemptId, mode);
if (got >= required) {
break;
}
}
} catch (IOException e) {
logger.error("error while calling spill() on " + c, e);
throw new OutOfMemoryError("error while calling spill() on " + c + " : "
+ e.getMessage());
}
}
}
// call spill() on itself
if (got < required) {
try {
long released = consumer.spill(required - got, consumer);
if (released > 0) {
logger.debug("Task {} released {} from itself ({})", taskAttemptId,
Utils.bytesToString(released), consumer);
got += memoryManager.acquireExecutionMemory(required - got, taskAttemptId, mode);
}
} catch (IOException e) {
logger.error("error while calling spill() on " + consumer, e);
throw new OutOfMemoryError("error while calling spill() on " + consumer + " : "
+ e.getMessage());
}
}
consumers.add(consumer);
logger.debug("Task {} acquired {} for {}", taskAttemptId, Utils.bytesToString(got), consumer);
return got;
}
}
/**
* Release N bytes of execution memory for a MemoryConsumer.
*/
public void releaseExecutionMemory(long size, MemoryConsumer consumer) {
logger.debug("Task {} release {} from {}", taskAttemptId, Utils.bytesToString(size), consumer);
memoryManager.releaseExecutionMemory(size, taskAttemptId, consumer.getMode());
}
/**
* Dump the memory usage of all consumers.
*/
public void showMemoryUsage() {
logger.info("Memory used in task " + taskAttemptId);
synchronized (this) {
long memoryAccountedForByConsumers = 0;
for (MemoryConsumer c: consumers) {
long totalMemUsage = c.getUsed();
memoryAccountedForByConsumers += totalMemUsage;
if (totalMemUsage > 0) {
logger.info("Acquired by " + c + ": " + Utils.bytesToString(totalMemUsage));
}
}
long memoryNotAccountedFor =
memoryManager.getExecutionMemoryUsageForTask(taskAttemptId) - memoryAccountedForByConsumers;
logger.info(
"{} bytes of memory were used by task {} but are not associated with specific consumers",
memoryNotAccountedFor, taskAttemptId);
logger.info(
"{} bytes of memory are used for execution and {} bytes of memory are used for storage",
memoryManager.executionMemoryUsed(), memoryManager.storageMemoryUsed());
}
}
/**
* Return the page size in bytes.
*/
public long pageSizeBytes() {
return memoryManager.pageSizeBytes();
}
/**
* Allocate a block of memory that will be tracked in the MemoryManager's page table; this is
* intended for allocating large blocks of Tungsten memory that will be shared between operators.
*
* Returns `null` if there was not enough memory to allocate the page. May return a page that
* contains fewer bytes than requested, so callers should verify the size of returned pages.
*/
public MemoryBlock allocatePage(long size, MemoryConsumer consumer) {
assert(consumer != null);
assert(consumer.getMode() == tungstenMemoryMode);
if (size > MAXIMUM_PAGE_SIZE_BYTES) {
throw new IllegalArgumentException(
"Cannot allocate a page with more than " + MAXIMUM_PAGE_SIZE_BYTES + " bytes");
}
long acquired = acquireExecutionMemory(size, consumer);
if (acquired <= 0) {
return null;
}
final int pageNumber;
synchronized (this) {
pageNumber = allocatedPages.nextClearBit(0);
if (pageNumber >= PAGE_TABLE_SIZE) {
releaseExecutionMemory(acquired, consumer);
throw new IllegalStateException(
"Have already allocated a maximum of " + PAGE_TABLE_SIZE + " pages");
}
allocatedPages.set(pageNumber);
}
MemoryBlock page = null;
try {
page = memoryManager.tungstenMemoryAllocator().allocate(acquired);
} catch (OutOfMemoryError e) {
logger.warn("Failed to allocate a page ({} bytes), try again.", acquired);
// there is no enough memory actually, it means the actual free memory is smaller than
// MemoryManager thought, we should keep the acquired memory.
synchronized (this) {
acquiredButNotUsed += acquired;
allocatedPages.clear(pageNumber);
}
// this could trigger spilling to free some pages.
return allocatePage(size, consumer);
}
page.pageNumber = pageNumber;
pageTable[pageNumber] = page;
if (logger.isTraceEnabled()) {
logger.trace("Allocate page number {} ({} bytes)", pageNumber, acquired);
}
return page;
}
/**
* Free a block of memory allocated via {@link TaskMemoryManager#allocatePage}.
*/
public void freePage(MemoryBlock page, MemoryConsumer consumer) {
assert (page.pageNumber != -1) :
"Called freePage() on memory that wasn't allocated with allocatePage()";
assert(allocatedPages.get(page.pageNumber));
pageTable[page.pageNumber] = null;
synchronized (this) {
allocatedPages.clear(page.pageNumber);
}
if (logger.isTraceEnabled()) {
logger.trace("Freed page number {} ({} bytes)", page.pageNumber, page.size());
}
long pageSize = page.size();
memoryManager.tungstenMemoryAllocator().free(page);
releaseExecutionMemory(pageSize, consumer);
}
/**
* Given a memory page and offset within that page, encode this address into a 64-bit long.
* This address will remain valid as long as the corresponding page has not been freed.
*
* @param page a data page allocated by {@link TaskMemoryManager#allocatePage}/
* @param offsetInPage an offset in this page which incorporates the base offset. In other words,
* this should be the value that you would pass as the base offset into an
* UNSAFE call (e.g. page.baseOffset() + something).
* @return an encoded page address.
*/
public long encodePageNumberAndOffset(MemoryBlock page, long offsetInPage) {
if (tungstenMemoryMode == MemoryMode.OFF_HEAP) {
// In off-heap mode, an offset is an absolute address that may require a full 64 bits to
// encode. Due to our page size limitation, though, we can convert this into an offset that's
// relative to the page's base offset; this relative offset will fit in 51 bits.
offsetInPage -= page.getBaseOffset();
}
return encodePageNumberAndOffset(page.pageNumber, offsetInPage);
}
@VisibleForTesting
public static long encodePageNumberAndOffset(int pageNumber, long offsetInPage) {
assert (pageNumber != -1) : "encodePageNumberAndOffset called with invalid page";
return (((long) pageNumber) << OFFSET_BITS) | (offsetInPage & MASK_LONG_LOWER_51_BITS);
}
@VisibleForTesting
public static int decodePageNumber(long pagePlusOffsetAddress) {
return (int) (pagePlusOffsetAddress >>> OFFSET_BITS);
}
private static long decodeOffset(long pagePlusOffsetAddress) {
return (pagePlusOffsetAddress & MASK_LONG_LOWER_51_BITS);
}
/**
* Get the page associated with an address encoded by
* {@link TaskMemoryManager#encodePageNumberAndOffset(MemoryBlock, long)}
*/
public Object getPage(long pagePlusOffsetAddress) {
if (tungstenMemoryMode == MemoryMode.ON_HEAP) {
final int pageNumber = decodePageNumber(pagePlusOffsetAddress);
assert (pageNumber >= 0 && pageNumber < PAGE_TABLE_SIZE);
final MemoryBlock page = pageTable[pageNumber];
assert (page != null);
assert (page.getBaseObject() != null);
return page.getBaseObject();
} else {
return null;
}
}
/**
* Get the offset associated with an address encoded by
* {@link TaskMemoryManager#encodePageNumberAndOffset(MemoryBlock, long)}
*/
public long getOffsetInPage(long pagePlusOffsetAddress) {
final long offsetInPage = decodeOffset(pagePlusOffsetAddress);
if (tungstenMemoryMode == MemoryMode.ON_HEAP) {
return offsetInPage;
} else {
// In off-heap mode, an offset is an absolute address. In encodePageNumberAndOffset, we
// converted the absolute address into a relative address. Here, we invert that operation:
final int pageNumber = decodePageNumber(pagePlusOffsetAddress);
assert (pageNumber >= 0 && pageNumber < PAGE_TABLE_SIZE);
final MemoryBlock page = pageTable[pageNumber];
assert (page != null);
return page.getBaseOffset() + offsetInPage;
}
}
/**
* Clean up all allocated memory and pages. Returns the number of bytes freed. A non-zero return
* value can be used to detect memory leaks.
*/
public long cleanUpAllAllocatedMemory() {
synchronized (this) {
for (MemoryConsumer c: consumers) {
if (c != null && c.getUsed() > 0) {
// In case of failed task, it's normal to see leaked memory
logger.debug("unreleased " + Utils.bytesToString(c.getUsed()) + " memory from " + c);
}
}
consumers.clear();
for (MemoryBlock page : pageTable) {
if (page != null) {
logger.debug("unreleased page: " + page + " in task " + taskAttemptId);
memoryManager.tungstenMemoryAllocator().free(page);
}
}
Arrays.fill(pageTable, null);
}
// release the memory that is not used by any consumer (acquired for pages in tungsten mode).
memoryManager.releaseExecutionMemory(acquiredButNotUsed, taskAttemptId, tungstenMemoryMode);
return memoryManager.releaseAllExecutionMemoryForTask(taskAttemptId);
}
/**
* Returns the memory consumption, in bytes, for the current task.
*/
public long getMemoryConsumptionForThisTask() {
return memoryManager.getExecutionMemoryUsageForTask(taskAttemptId);
}
/**
* Returns Tungsten memory mode
*/
public MemoryMode getTungstenMemoryMode() {
return tungstenMemoryMode;
}
}
| apache-2.0 |
hnccfr/ccfrweb | admin/src/com/hundsun/network/gates/genshan/biz/domain/user/UserAddress.java | 4655 | /* */ package com.hundsun.network.gates.genshan.biz.domain.user;
/* */
/* */ import java.util.Date;
/* */
/* */ public class UserAddress
/* */ {
/* */ private Long id;
/* */ private String userAccount;
/* */ private String linkman;
/* */ private String phone;
/* */ private String type;
/* */ private String province;
/* */ private String city;
/* */ private String area;
/* */ private String address;
/* */ private String zipCode;
/* */ private String isDefault;
/* */ private Date gmtCreate;
/* */ private Date gmtModify;
/* */ private String operator;
/* */ private String storehouse;
/* */
/* */ public Long getId()
/* */ {
/* 82 */ return this.id;
/* */ }
/* */
/* */ public void setId(Long id)
/* */ {
/* 89 */ this.id = id;
/* */ }
/* */
/* */ public String getUserAccount()
/* */ {
/* 96 */ return this.userAccount;
/* */ }
/* */
/* */ public void setUserAccount(String userAccount)
/* */ {
/* 103 */ this.userAccount = userAccount;
/* */ }
/* */
/* */ public String getLinkman()
/* */ {
/* 110 */ return this.linkman;
/* */ }
/* */
/* */ public void setLinkman(String linkman)
/* */ {
/* 117 */ this.linkman = linkman;
/* */ }
/* */
/* */ public String getPhone()
/* */ {
/* 124 */ return this.phone;
/* */ }
/* */
/* */ public void setPhone(String phone)
/* */ {
/* 131 */ this.phone = phone;
/* */ }
/* */
/* */ public String getType()
/* */ {
/* 138 */ return this.type;
/* */ }
/* */
/* */ public void setType(String type)
/* */ {
/* 145 */ this.type = type;
/* */ }
/* */
/* */ public String getProvince()
/* */ {
/* 152 */ return this.province;
/* */ }
/* */
/* */ public void setProvince(String province)
/* */ {
/* 159 */ this.province = province;
/* */ }
/* */
/* */ public String getCity()
/* */ {
/* 166 */ return this.city;
/* */ }
/* */
/* */ public void setCity(String city)
/* */ {
/* 173 */ this.city = city;
/* */ }
/* */
/* */ public String getArea()
/* */ {
/* 180 */ return this.area;
/* */ }
/* */
/* */ public void setArea(String area)
/* */ {
/* 187 */ this.area = area;
/* */ }
/* */
/* */ public String getAddress() {
/* 191 */ return this.address;
/* */ }
/* */
/* */ public void setAddress(String address) {
/* 195 */ this.address = address;
/* */ }
/* */
/* */ public String getZipCode()
/* */ {
/* 202 */ return this.zipCode;
/* */ }
/* */
/* */ public void setZipCode(String zipCode)
/* */ {
/* 209 */ this.zipCode = zipCode;
/* */ }
/* */
/* */ public String getIsDefault()
/* */ {
/* 216 */ return this.isDefault;
/* */ }
/* */
/* */ public void setIsDefault(String isDefault)
/* */ {
/* 223 */ this.isDefault = isDefault;
/* */ }
/* */
/* */ public Date getGmtCreate()
/* */ {
/* 230 */ return this.gmtCreate;
/* */ }
/* */
/* */ public void setGmtCreate(Date gmtCreate)
/* */ {
/* 237 */ this.gmtCreate = gmtCreate;
/* */ }
/* */
/* */ public Date getGmtModify()
/* */ {
/* 244 */ return this.gmtModify;
/* */ }
/* */
/* */ public void setGmtModify(Date gmtModify)
/* */ {
/* 251 */ this.gmtModify = gmtModify;
/* */ }
/* */
/* */ public String getOperator()
/* */ {
/* 258 */ return this.operator;
/* */ }
/* */
/* */ public void setOperator(String operator)
/* */ {
/* 265 */ this.operator = operator;
/* */ }
/* */
/* */ public void setStorehouse(String storehouse) {
/* 269 */ this.storehouse = storehouse;
/* */ }
/* */
/* */ public String getStorehouse() {
/* 273 */ return this.storehouse;
/* */ }
/* */ }
/* Location: E:\__安装归档\linquan-20161112\deploy15\genshan\webroot\WEB-INF\classes\
* Qualified Name: com.hundsun.network.gates.genshan.biz.domain.user.UserAddress
* JD-Core Version: 0.6.0
*/ | apache-2.0 |
harishkswamy/gravity | framework/src/main/gravity/impl/ComponentStrategyDecorator.java | 2630 | // Copyright 2004 The Apache Software Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gravity.impl;
import gravity.ComponentStrategy;
import gravity.Context;
import gravity.RealizableComponent;
// TODO Unwrap (1 level up) and Strip (unwrap all levels to level 1) strategy
/**
* This class decorates {@link gravity.ComponentStrategy}. It can decorate the strategies
* infinitely deep.
*
* @author Harish Krishnaswamy
* @version $Id: ComponentStrategyDecorator.java,v 1.5 2005-10-06 21:59:27 harishkswamy Exp $
*/
public abstract class ComponentStrategyDecorator implements ComponentStrategy
{
protected Context _context;
protected ComponentStrategy _decoratedStrategy;
public void initialize(Context context, ComponentStrategy strategy)
{
_context = context;
_decoratedStrategy = strategy;
}
/**
* @return Returns a new concrete instance for the provided component when this strategy does
* not decorate another strategy, otherwise it simply defers to the decorated strategy.
*/
public Object getComponentInstance(RealizableComponent component)
{
if (_decoratedStrategy == null)
return component.newInstance();
return _decoratedStrategy.getComponentInstance(component);
}
/**
* When this strategy decorates another strategy, this method defers to the decorated strategy,
* otherwise it does nothing.
* <p>
* This method is intended to be overridden by subclasses that needs to recollect served
* instances like the {@link PoolingComponentStrategy}for example.
*/
public void collectComponentInstance(Object comp)
{
if (_decoratedStrategy != null)
_decoratedStrategy.collectComponentInstance(comp);
}
/**
* @return Returns an empty string when this strategy does not decorate another strategy,
* otherwise it defers to the decorated strategy.
*/
public String decoratedStrategyToString()
{
return _decoratedStrategy == null ? "" : _decoratedStrategy.toString();
}
} | apache-2.0 |
SergeyTravin/data-access | core/src/main/java/org/pentaho/platform/dataaccess/datasource/api/MetadataService.java | 17425 | /*!
* This program is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License, version 2.1 as published by the Free Software
* Foundation.
*
* You should have received a copy of the GNU Lesser General Public License along with this
* program; if not, you can obtain a copy at http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html
* or from the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* Copyright (c) 2002-2017 Hitachi Vantara.. All rights reserved.
*/
package org.pentaho.platform.dataaccess.datasource.api;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.StringWriter;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import com.google.gwt.thirdparty.guava.common.annotations.VisibleForTesting;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.pentaho.metadata.model.Domain;
import org.pentaho.metadata.util.XmiParser;
import org.pentaho.platform.api.engine.IApplicationContext;
import org.pentaho.platform.api.engine.IPentahoSession;
import org.pentaho.platform.api.engine.PentahoAccessControlException;
import org.pentaho.platform.api.repository2.unified.IPlatformImportBundle;
import org.pentaho.platform.api.repository2.unified.RepositoryFileAcl;
import org.pentaho.platform.dataaccess.datasource.api.resources.MetadataTempFilesListBundleDto;
import org.pentaho.platform.dataaccess.datasource.api.resources.MetadataTempFilesListDto;
import org.pentaho.platform.dataaccess.datasource.wizard.service.ConnectionServiceException;
import org.pentaho.platform.dataaccess.datasource.wizard.service.messages.Messages;
import org.pentaho.platform.engine.core.system.PentahoSessionHolder;
import org.pentaho.platform.engine.core.system.PentahoSystem;
import org.pentaho.platform.plugin.services.importer.IPlatformImporter;
import org.pentaho.platform.plugin.services.importer.PlatformImportException;
import org.pentaho.platform.plugin.services.importer.RepositoryFileImportBundle;
import org.pentaho.platform.plugin.services.metadata.IAclAwarePentahoMetadataDomainRepositoryImporter;
import org.pentaho.platform.plugin.services.metadata.IPentahoMetadataDomainRepositoryExporter;
import org.pentaho.platform.repository2.unified.webservices.RepositoryFileAclDto;
import org.pentaho.platform.util.UUIDUtil;
import org.pentaho.platform.web.http.api.resources.FileResource;
import org.pentaho.platform.web.servlet.UploadFileUtils;
import com.sun.jersey.core.header.FormDataContentDisposition;
import com.sun.jersey.multipart.FormDataBodyPart;
public class MetadataService extends DatasourceService {
private static final String XMI_EXTENSION = ".xmi";
protected IAclAwarePentahoMetadataDomainRepositoryImporter aclAwarePentahoMetadataDomainRepositoryImporter;
private static final Log logger = LogFactory.getLog( MetadataService.class );
private static String upload_dir;
public static String getUploadDir() {
if ( upload_dir == null ) {
IApplicationContext context = PentahoSystem.getApplicationContext();
if ( context != null ) {
upload_dir = PentahoSystem.getApplicationContext().getSolutionPath( "system/tmp" );
} else {
return "";
}
}
return upload_dir;
}
protected String internalGetUploadDir() {
return MetadataService.getUploadDir();
}
public MetadataService() {
if ( metadataDomainRepository instanceof IAclAwarePentahoMetadataDomainRepositoryImporter ) {
aclAwarePentahoMetadataDomainRepositoryImporter = (IAclAwarePentahoMetadataDomainRepositoryImporter) metadataDomainRepository;
}
}
public void removeMetadata( String metadataId ) throws PentahoAccessControlException {
try {
ensureDataAccessPermissionCheck();
} catch ( ConnectionServiceException e ) {
throw new PentahoAccessControlException();
}
metadataDomainRepository.removeDomain( metadataId );
}
public List<String> getMetadataDatasourceIds() {
List<String> metadataIds = new ArrayList<String>();
try {
sleep( 100 );
for ( String id : metadataDomainRepository.getDomainIds() ) {
if ( isMetadataDatasource( id ) ) {
metadataIds.add( id );
}
}
} catch ( InterruptedException e ) {
e.printStackTrace();
}
return metadataIds;
}
public MetadataTempFilesListDto uploadMetadataFilesToTempDir( InputStream metadataFile,
List<InputStream> localeFileStreams, List<String> localeFileNames ) throws Exception {
String fileName = uploadFile( metadataFile );
MetadataTempFilesListDto dto = new MetadataTempFilesListDto();
dto.setXmiFileName( fileName );
logger.info( "metadata file uploaded: " + fileName );
if ( localeFileStreams != null && localeFileStreams.size() != 0 ) {
List<MetadataTempFilesListBundleDto> bundles = new ArrayList<MetadataTempFilesListBundleDto>();
int cntr = 0;
for ( InputStream inputStream : localeFileStreams ) {
fileName = uploadFile( inputStream );
MetadataTempFilesListBundleDto bundle = new MetadataTempFilesListBundleDto(
localeFileNames.get( cntr ),
fileName );
bundles.add( bundle );
logger.info( "locale file uploaded: " + fileName );
cntr++;
}
dto.setBundles( bundles );
}
return dto;
}
protected String uploadFile( InputStream is ) throws Exception {
StringWriter fileNameWriter = new StringWriter();
UploadFileUtils utils = new UploadFileUtils( PentahoSessionHolder.getSession() );
utils.setShouldUnzip( false );
utils.setTemporary( true );
utils.setFileName( UUIDUtil.getUUID().toString() );
utils.setWriter( fileNameWriter );
utils.process( is );
return fileNameWriter.toString();
}
public MetadataTempFilesListDto uploadMetadataFilesToTempDir( InputStream metadataFile,
List<FormDataBodyPart> localeFiles ) throws Exception {
List<InputStream> bundles = null;
List<String> fileNames = null;
if ( localeFiles != null && localeFiles.size() != 0 ) {
bundles = new ArrayList<InputStream>();
fileNames = new ArrayList<String>();
for ( FormDataBodyPart localeFile : localeFiles ) {
InputStream inputStream = new ByteArrayInputStream( localeFile.getValueAs( byte[].class ) );
bundles.add( inputStream );
fileNames.add( localeFile.getFormDataContentDisposition().getFileName() );
}
}
return uploadMetadataFilesToTempDir( metadataFile, bundles, fileNames );
}
public void importMetadataDatasource( String domainId, InputStream metadataFile,
FormDataContentDisposition metadataFileInfo, boolean overwrite,
List<FormDataBodyPart> localeFiles,
List<FormDataContentDisposition> localeFilesInfo, RepositoryFileAclDto acl )
throws PentahoAccessControlException, PlatformImportException,
Exception {
if ( StringUtils.isEmpty( domainId ) ) {
throw new PlatformImportException( Messages.getString( "MetadataDatasourceService.ERROR_005_DOMAIN_NAME_EMPTY" ) );
}
List<InputStream> localeFileStreams = null;
List<String> localeFileNames = null;
if ( localeFiles != null ) {
localeFileStreams = new ArrayList<InputStream>();
localeFileNames = new ArrayList<String>();
for ( int i = 0; i < localeFiles.size(); i++ ) {
logger.info( "create language file" );
InputStream inputStream = createNewByteArrayInputStream( localeFiles.get( i ).getValueAs( byte[].class ) );
localeFileStreams.add( inputStream );
localeFileNames.add( localeFilesInfo.get( i ).getFileName() );
}
}
importMetadataDatasource( domainId, metadataFile, overwrite, localeFileStreams, localeFileNames, acl );
}
public void importMetadataDatasource( String domainId, InputStream metadataFile, boolean overwrite,
List<InputStream> localeFileStreams, List<String> localeFileNames, RepositoryFileAclDto acl )
throws PentahoAccessControlException, PlatformImportException, Exception {
if ( StringUtils.isEmpty( domainId ) ) {
throw new PlatformImportException( Messages.getString( "MetadataDatasourceService.ERROR_005_DOMAIN_NAME_EMPTY" ) );
}
accessValidation();
FileResource fr = createNewFileResource();
Object reservedCharsObject = fr.doGetReservedChars().getEntity();
String reservedChars = objectToString( reservedCharsObject );
if ( reservedChars != null
&& domainId.matches( ".*[" + reservedChars.replaceAll( "/", "" ) + "]+.*" ) ) {
String msg = prohibitedSymbolMessage( domainId, fr );
throw new PlatformImportException( msg, PlatformImportException.PUBLISH_PROHIBITED_SYMBOLS_ERROR );
}
metadataFile = validateFileSize( metadataFile, domainId );
// domain ID comes with ".xmi" suffix when creating or editing domain
// (see ModelerService.serializeModels( Domain, String, boolean ) ),
// but when the user enters domain ID manually when importing metadata file,
// it will unlikely contain that suffix, so let's add it forcibly.
domainId = forceXmiSuffix( domainId );
RepositoryFileImportBundle.Builder bundleBuilder = createNewRepositoryFileImportBundleBuilder( metadataFile, overwrite, domainId, acl );
if ( localeFileStreams != null ) {
for ( int i = 0; i < localeFileStreams.size(); i++ ) {
IPlatformImportBundle localizationBundle = createNewRepositoryFileImportBundle( localeFileStreams.get( i ), localeFileNames.get( i ), domainId );
bundleBuilder.addChildBundle( localizationBundle );
}
}
IPlatformImportBundle bundle = bundleBuilder.build();
IPlatformImporter importer = getImporter();
importer.importFile( bundle );
IPentahoSession pentahoSession = getSession();
publish( pentahoSession );
}
public boolean isContainsModel( String tempFileName ) throws Exception {
XmiParser xmiParser = new XmiParser();
byte[] is = IOUtils.toByteArray( createInputStreamFromFile( internalGetUploadDir() + File.separatorChar + tempFileName ) );
Domain domain = xmiParser.parseXmi( new java.io.ByteArrayInputStream( is ) );
return isContainsModel( domain );
}
protected boolean isContainsModel( Domain domain ) throws Exception {
return !DatasourceService.isMetadataDatasource( domain ) && domain.getLogicalModels().size() > 1;
}
public void importMetadataFromTemp( String domainId, MetadataTempFilesListDto fileList,
boolean overwrite, RepositoryFileAclDto acl ) throws PentahoAccessControlException, PlatformImportException, Exception {
String metadataTempFileName = fileList.getXmiFileName();
InputStream metaDataFileInputStream = createInputStreamFromFile( internalGetUploadDir() + File.separatorChar + metadataTempFileName );
List<MetadataTempFilesListBundleDto> locBundles = fileList.getBundles();
List<String> localeFileNames = new ArrayList<String>();
List<InputStream> localeFileStreams = new ArrayList<InputStream>();
if ( locBundles != null ) {
for ( MetadataTempFilesListBundleDto bundle : locBundles ) {
localeFileNames.add( bundle.getOriginalFileName() );
localeFileStreams.add( createInputStreamFromFile( internalGetUploadDir() + File.separatorChar + bundle.getTempFileName() ) );
}
}
importMetadataDatasource( domainId, metaDataFileInputStream, overwrite, localeFileStreams, localeFileNames, acl );
}
@VisibleForTesting
InputStream validateFileSize( InputStream metadataFile, String domainId )
throws IOException, PlatformImportException {
// maxFileLimit is 10 Mb by default
String maxFileLimit = PentahoSystem
.getSystemSetting( "file-upload-defaults/max-file-limit", String.valueOf( 10000000 ) ); //$NON-NLS-1$
byte[] bytes = IOUtils.toByteArray( metadataFile );
if ( Long.parseLong( maxFileLimit ) < bytes.length ) {
String msg = Messages.getString( "MetadataDatasourceService.ERROR_004_MAX_FILE_SIZE_EXCEEDED_ERROR", domainId );
throw new PlatformImportException( msg, PlatformImportException.PUBLISH_DATASOURCE_ERROR );
}
return new ByteArrayInputStream( bytes );
}
public RepositoryFileAclDto getMetadataAcl( String domainId )
throws PentahoAccessControlException, FileNotFoundException {
checkMetadataExists( domainId );
if ( aclAwarePentahoMetadataDomainRepositoryImporter != null ) {
final RepositoryFileAcl acl = aclAwarePentahoMetadataDomainRepositoryImporter.getAclFor( domainId );
return acl == null ? null : repositoryFileAclAdapter.marshal( acl );
}
return null;
}
public void setMetadataAcl( String domainId, RepositoryFileAclDto aclDto )
throws PentahoAccessControlException, FileNotFoundException {
checkMetadataExists( domainId );
if ( aclAwarePentahoMetadataDomainRepositoryImporter != null ) {
final RepositoryFileAcl acl = aclDto == null ? null : repositoryFileAclAdapter.unmarshal( aclDto );
aclAwarePentahoMetadataDomainRepositoryImporter.setAclFor( domainId, acl );
flushDataSources();
}
}
private void checkMetadataExists( String domainId ) throws PentahoAccessControlException, FileNotFoundException {
if ( !canManageACL() ) {
throw new PentahoAccessControlException();
}
if ( metadataDomainRepository instanceof IPentahoMetadataDomainRepositoryExporter ) {
Map<String, InputStream> domainFilesData =
( (IPentahoMetadataDomainRepositoryExporter) metadataDomainRepository ).getDomainFilesData( domainId );
if ( domainFilesData == null || domainFilesData.isEmpty() ) {
throw new FileNotFoundException();
}
}
}
protected void sleep( int i ) throws InterruptedException {
Thread.sleep( i );
}
protected String prohibitedSymbolMessage( String domainId, FileResource fr ) throws InterruptedException {
String illegalCharacterList = (String) fr.doGetReservedCharactersDisplay().getEntity();
//For metadata \ is a legal character and must be removed from the message list before returning the message list to the user
illegalCharacterList = illegalCharacterList.replaceAll( "\\,", "" );
return Messages.getString( "MetadataDatasourceService.ERROR_003_PROHIBITED_SYMBOLS_ERROR", domainId, illegalCharacterList );
}
protected String objectToString( Object o ) throws InterruptedException {
return (String) o;
}
protected void publish( IPentahoSession pentahoSession ) throws InterruptedException {
PentahoSystem.publish( pentahoSession, org.pentaho.platform.engine.services.metadata.MetadataPublisher.class.getName() );
}
protected IPentahoSession getSession() throws InterruptedException {
return PentahoSessionHolder.getSession();
}
protected IPlatformImporter getImporter() throws InterruptedException {
return PentahoSystem.get( IPlatformImporter.class );
}
protected void accessValidation() throws PentahoAccessControlException {
super.validateAccess();
}
protected boolean canAdministerCheck() {
return super.canAdminister();
}
protected void ensureDataAccessPermissionCheck() throws ConnectionServiceException {
super.ensureDataAccessPermission();
}
protected FileResource createNewFileResource() {
return new FileResource();
}
protected RepositoryFileImportBundle.Builder createNewRepositoryFileImportBundleBuilder( InputStream metadataFile,
boolean overWriteInRepository, String domainId, RepositoryFileAclDto acl ) {
final RepositoryFileImportBundle.Builder
builder =
new RepositoryFileImportBundle.Builder().input( metadataFile ).charSet( "UTF-8" ).hidden( false )
.overwriteFile( overWriteInRepository ).mime( "text/xmi+xml" ).withParam( "domain-id", domainId );
if ( acl != null ) {
builder.acl( repositoryFileAclAdapter.unmarshal( acl ) ).applyAclSettings( true );
}
return builder;
}
protected InputStream createInputStreamFromFile( String fileName ) throws FileNotFoundException {
return new FileInputStream( fileName );
}
public static RepositoryFileImportBundle createNewRepositoryFileImportBundle( InputStream bais, String fileName, String domainId ) {
return new RepositoryFileImportBundle.Builder().input( bais ).charSet( "UTF-8" ).hidden( false )
.name( fileName ).withParam( "domain-id", domainId )
.build();
}
protected ByteArrayInputStream createNewByteArrayInputStream( byte[] buf ) {
if ( buf != null ) {
return new ByteArrayInputStream( buf );
} else {
return null;
}
}
private static String forceXmiSuffix( String domainId ) {
if ( domainId.endsWith( XMI_EXTENSION ) ) {
return domainId;
} else {
return domainId + XMI_EXTENSION;
}
}
}
| apache-2.0 |