text
stringlengths 7
1.01M
|
|---|
/*
* The MIT License
*
* Copyright 2013 Red Hat, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package hudson.cli;
import static hudson.cli.CLICommandInvoker.Matcher.failedWith;
import static hudson.cli.CLICommandInvoker.Matcher.hasNoStandardOutput;
import static hudson.cli.CLICommandInvoker.Matcher.succeededSilently;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import hudson.model.FreeStyleBuild;
import hudson.model.FreeStyleProject;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.jvnet.hudson.test.JenkinsRule;
public class SetBuildDisplayNameCommandTest {
private CLICommandInvoker command;
@Rule public final JenkinsRule j = new JenkinsRule();
@Before public void setUp() {
command = new CLICommandInvoker(j, new SetBuildDisplayNameCommand());
}
@Test public void referencingBuildThatDoesNotExistsShouldFail() throws Exception {
j.createFreeStyleProject("project");
final CLICommandInvoker.Result result = command
.invokeWithArgs("project", "42", "DisplayName")
;
assertThat(result.stderr(), containsString("Build #42 does not exist"));
assertThat(result, hasNoStandardOutput());
assertThat(result, failedWith(-1));
}
@Test public void setDescriptionSuccessfully() throws Exception {
FreeStyleProject job = j.createFreeStyleProject("project");
FreeStyleBuild build = job.scheduleBuild2(0).get();
final CLICommandInvoker.Result result = command
.invokeWithArgs("project", "1", "DisplayName")
;
assertThat(result, succeededSilently());
assertThat(build.getDisplayName(), equalTo("DisplayName"));
}
}
|
// This file is part of OpenTSDB.
// Copyright (C) 2013 The OpenTSDB Authors.
//
// This program is free software: you can redistribute it and/or modify it
// under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 2.1 of the License, or (at your
// option) any later version. This program is distributed in the hope that it
// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
// General Public License for more details. You should have received a copy
// of the GNU Lesser General Public License along with this program. If not,
// see <http://www.gnu.org/licenses/>.
package net.opentsdb.tsd;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.anyString;
import static org.powermock.api.mockito.PowerMockito.mock;
import java.lang.reflect.Method;
import java.util.TreeMap;
import net.opentsdb.core.TSDB;
import net.opentsdb.meta.TSMeta;
import net.opentsdb.meta.UIDMeta;
import net.opentsdb.storage.MockBase;
import net.opentsdb.tree.Branch;
import net.opentsdb.tree.Leaf;
import net.opentsdb.tree.TestTree;
import net.opentsdb.tree.Tree;
import net.opentsdb.tree.TreeRule;
import net.opentsdb.tree.TreeRule.TreeRuleType;
import net.opentsdb.uid.UniqueId;
import net.opentsdb.uid.UniqueId.UniqueIdType;
import net.opentsdb.utils.Config;
import net.opentsdb.utils.JSON;
import org.hbase.async.DeleteRequest;
import org.hbase.async.GetRequest;
import org.hbase.async.HBaseClient;
import org.hbase.async.KeyValue;
import org.hbase.async.PutRequest;
import org.hbase.async.Scanner;
import org.jboss.netty.handler.codec.http.DefaultHttpRequest;
import org.jboss.netty.handler.codec.http.HttpMethod;
import org.jboss.netty.handler.codec.http.HttpRequest;
import org.jboss.netty.handler.codec.http.HttpResponseStatus;
import org.jboss.netty.handler.codec.http.HttpVersion;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.powermock.api.mockito.PowerMockito;
import org.powermock.core.classloader.annotations.PowerMockIgnore;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
@RunWith(PowerMockRunner.class)
@PowerMockIgnore({"javax.management.*", "javax.xml.*",
"ch.qos.*", "org.slf4j.*",
"com.sum.*", "org.xml.*"})
@PrepareForTest({ TSDB.class, HBaseClient.class, GetRequest.class, Tree.class,
PutRequest.class, KeyValue.class, Scanner.class, DeleteRequest.class })
public final class TestTreeRpc {
private static byte[] NAME_FAMILY = "name".getBytes(MockBase.ASCII());
private TSDB tsdb;
private HBaseClient client = mock(HBaseClient.class);
private MockBase storage;
private TreeRpc rpc = new TreeRpc();
final static private Method branchToStorageJson;
static {
try {
branchToStorageJson = Branch.class.getDeclaredMethod("toStorageJson");
branchToStorageJson.setAccessible(true);
} catch (Exception e) {
throw new RuntimeException("Failed in static initializer", e);
}
}
final static private Method TreetoStorageJson;
static {
try {
TreetoStorageJson = Tree.class.getDeclaredMethod("toStorageJson");
TreetoStorageJson.setAccessible(true);
} catch (Exception e) {
throw new RuntimeException("Failed in static initializer", e);
}
}
final static private Method LeaftoStorageJson;
static {
try {
LeaftoStorageJson = Leaf.class.getDeclaredMethod("toStorageJson");
LeaftoStorageJson.setAccessible(true);
} catch (Exception e) {
throw new RuntimeException("Failed in static initializer", e);
}
}
final static private Method TSMetagetStorageJSON;
static {
try {
TSMetagetStorageJSON = TSMeta.class.getDeclaredMethod("getStorageJSON");
TSMetagetStorageJSON.setAccessible(true);
} catch (Exception e) {
throw new RuntimeException("Failed in static initializer", e);
}
}
final static private Method UIDMetagetStorageJSON;
static {
try {
UIDMetagetStorageJSON = UIDMeta.class.getDeclaredMethod("getStorageJSON");
UIDMetagetStorageJSON.setAccessible(true);
} catch (Exception e) {
throw new RuntimeException("Failed in static initializer", e);
}
}
@Before
public void before() throws Exception {
final Config config = new Config(false);
PowerMockito.whenNew(HBaseClient.class)
.withArguments(anyString(), anyString()).thenReturn(client);
tsdb = new TSDB(config);
storage = new MockBase(tsdb, client, true, true, true, true);
}
@Test
public void constructor() throws Exception {
new TreeRpc();
}
@Test (expected = BadRequestException.class)
public void noRoute() throws Exception {
HttpQuery query = NettyMocks.getQuery(tsdb, "/api/tree/noroute");
rpc.execute(tsdb, query);
}
@Test (expected = BadRequestException.class)
public void handleTreeBadMethod() throws Exception {
final HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1,
HttpMethod.TRACE, "/api/tree");
final HttpQuery query = new HttpQuery(tsdb, req, NettyMocks.fakeChannel());
rpc.execute(tsdb, query);
}
@Test
public void handleTreeGetAll() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.OK, query.response().getStatus());
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"name\":\"Test Tree\""));
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"name\":\"2nd Tree\""));
}
@Test
public void handleTreeGetSingle() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree?treeid=2");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.OK, query.response().getStatus());
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"name\":\"2nd Tree\""));
assertFalse(query.response().getContent().toString(MockBase.ASCII())
.contains("\"name\":\"Test Tree\""));
}
@Test (expected = BadRequestException.class)
public void handleTreeGetNotFound() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb, "/api/tree?treeid=3");
rpc.execute(tsdb, query);
}
@Test (expected = BadRequestException.class)
public void handleTreeGetBadID655536() throws Exception {
HttpQuery query = NettyMocks.getQuery(tsdb, "/api/tree?treeid=655536");
rpc.execute(tsdb, query);
}
@Test
public void handleTreeQSCreate() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree?name=NewTree&method_override=post");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.OK, query.response().getStatus());
assertEquals(1, storage.numColumns(new byte[] { 0, 3 }));
}
@Test (expected = BadRequestException.class)
public void handleTreeQSCreateNoName() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree?method_override=post&description=HelloWorld");
rpc.execute(tsdb, query);
}
@Test (expected = BadRequestException.class)
public void handleTreeQSCreateOutOfIDs() throws Exception {
setupStorage();
storage.addColumn(new byte[] { (byte) 0xFF, (byte) 0xFF },
"tree".getBytes(MockBase.ASCII()), "{}".getBytes(MockBase.ASCII()));
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree?method_override=post");
rpc.execute(tsdb, query);
}
@Test
public void handleTreePOSTCreate() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.postQuery(tsdb,
"/api/tree", "{\"name\":\"New Tree\"}");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.OK, query.response().getStatus());
assertEquals(1, storage.numColumns(new byte[] { 0, 3 }));
}
@Test
public void handleTreeQSModify() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree?treeid=1&method_override=post&description=HelloWorld");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.OK, query.response().getStatus());
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"description\":\"HelloWorld\""));
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"name\":\"Test Tree\""));
}
@Test (expected = BadRequestException.class)
public void handleTreeQSModifyNotFound() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree?treeid=3&method_override=post&description=HelloWorld");
rpc.execute(tsdb, query);
}
@Test
public void handleTreeQSModifyNotModified() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree?treeid=1&method_override=post");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.NOT_MODIFIED, query.response().getStatus());
}
@Test
public void handleTreePOSTModify() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.postQuery(tsdb,
"/api/tree", "{\"treeId\":1,\"description\":\"Hello World\"}");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.OK, query.response().getStatus());
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"description\":\"Hello World\""));
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"name\":\"Test Tree\""));
}
@Test (expected = BadRequestException.class)
public void handleTreeQSPutNotFound() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree?treeid=3&method_override=put&description=HelloWorld");
rpc.execute(tsdb, query);
}
@Test
public void handleTreeQSPutNotModified() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree?treeid=1&method_override=put");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.NOT_MODIFIED, query.response().getStatus());
}
@Test
public void handleTreeQSPut() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree?treeid=1&method_override=put&description=HelloWorld");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.OK, query.response().getStatus());
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"description\":\"HelloWorld\""));
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"name\":\"\""));
}
@Test
public void handleTreePOSTPut() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.putQuery(tsdb,
"/api/tree", "{\"treeId\":1,\"description\":\"Hello World\"}");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.OK, query.response().getStatus());
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"description\":\"Hello World\""));
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"name\":\"\""));
}
@Test
public void handleTreeQSDeleteDefault() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree?treeid=1&method_override=delete");
// make sure the root is there BEFORE we delete
assertEquals(4, storage.numColumns(new byte[] { 0, 1 }));
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus());
// make sure the definition is still there but the root is gone
assertEquals(3, storage.numColumns(new byte[] { 0, 1 }));
assertEquals(-1, storage.numColumns(
Branch.stringToId("00010001BECD000181A8")));
assertEquals(-1, storage.numColumns(
Branch.stringToId("00010001BECD000181A8BF992A99")));
}
@Test
public void handleTreeQSDeleteDefinition() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree?treeid=1&method_override=delete&definition=true");
// make sure the root is there BEFORE we delete
assertEquals(4, storage.numColumns(new byte[] { 0, 1 }));
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus());
// make sure the definition has been deleted too
assertEquals(-1, storage.numColumns(new byte[] { 0, 1 }));
assertEquals(-1, storage.numColumns(
Branch.stringToId("00010001BECD000181A8")));
assertEquals(-1, storage.numColumns(
Branch.stringToId("00010001BECD000181A8BF992A99")));
}
@Test
public void handleTreePOSTDeleteDefault() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.deleteQuery(tsdb,
"/api/tree", "{\"treeId\":1}");
// make sure the root is there BEFORE we delete
assertEquals(4, storage.numColumns(new byte[] { 0, 1 }));
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus());
// make sure the definition is still there but the root is gone
assertEquals(3, storage.numColumns(new byte[] { 0, 1 }));
assertEquals(-1, storage.numColumns(
Branch.stringToId("00010001BECD000181A8")));
assertEquals(-1, storage.numColumns(
Branch.stringToId("00010001BECD000181A8BF992A99")));
}
@Test
public void handleTreePOSTDeleteDefinition() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.deleteQuery(tsdb,
"/api/tree", "{\"treeId\":1,\"definition\":true}");
// make sure the root is there BEFORE we delete
assertEquals(4, storage.numColumns(new byte[] { 0, 1 }));
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus());
// make sure the definition has been deleted too
assertEquals(-1, storage.numColumns(new byte[] { 0, 1 }));
assertEquals(-1, storage.numColumns(
Branch.stringToId("00010001BECD000181A8")));
assertEquals(-1, storage.numColumns(
Branch.stringToId("00010001BECD000181A8BF992A99")));
}
@Test (expected = BadRequestException.class)
public void handleTreeQSDeleteNotFound() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree?treeid=3&method_override=delete");
rpc.execute(tsdb, query);
}
@Test
public void handleBranchRoot() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb, "/api/tree/branch?treeid=1");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.OK, query.response().getStatus());
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"displayName\":\"ROOT\""));
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"branches\":null"));
}
@Test
public void handleBranchChild() throws Exception {
setupStorage();
setupBranch();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree/branch?branch=00010001BECD000181A8");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.OK, query.response().getStatus());
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"metric\":\"sys.cpu.0\""));
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"branches\":["));
}
@Test (expected = BadRequestException.class)
public void handleBranchNotFound() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree/branch?branch=00010001BECD000181A8BBBBB");
rpc.execute(tsdb, query);
}
@Test (expected = BadRequestException.class)
public void handleBranchNoTree() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree/branch");
rpc.execute(tsdb, query);
}
@Test (expected = BadRequestException.class)
public void handleBranchBadMethod() throws Exception {
final HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1,
HttpMethod.TRACE, "/api/tree/branch");
final HttpQuery query = new HttpQuery(tsdb, req, NettyMocks.fakeChannel());
rpc.execute(tsdb, query);
}
@Test
public void handleRuleGetQS() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree/rule?treeid=1&level=1&order=0");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.OK, query.response().getStatus());
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"type\":\"METRIC\""));
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"level\":1"));
}
@Test (expected = BadRequestException.class)
public void handleRuleGetQSNotFound() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree/rule?treeid=1&level=2&order=2");
rpc.execute(tsdb, query);
}
@Test (expected = BadRequestException.class)
public void handleRuleGetQSTreeNotFound() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree/rule?treeid=4&level=1&order=0");
rpc.execute(tsdb, query);
}
@Test (expected = BadRequestException.class)
public void handleRuleGetQSMissingTree() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree/rule?level=1&order=0");
rpc.execute(tsdb, query);
}
@Test (expected = BadRequestException.class)
public void handleRuleGetQSMissingLevel() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree/rule?treeid=1&order=0");
rpc.execute(tsdb, query);
}
@Test (expected = BadRequestException.class)
public void handleRuleGetQSMissingOrder() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree/rule?treeid=1&level=1");
rpc.execute(tsdb, query);
}
@Test
public void handleRuleQSNew() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree/rule?treeid=1&level=2&order=1&description=Testing" +
"&method_override=post&type=metric");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.OK, query.response().getStatus());
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"description\":\"Testing\""));
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"level\":2"));
}
@Test (expected = BadRequestException.class)
public void handleRuleQSNewFailValidation() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree/rule?treeid=1&level=2&order=1&description=Testing" +
"&method_override=post&type=tagk");
rpc.execute(tsdb, query);
}
@Test (expected = BadRequestException.class)
public void handleRuleQSNewMissingType() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree/rule?treeid=1&level=2&order=1&description=Testing&method_override=post");
rpc.execute(tsdb, query);
}
@Test
public void handleRuleQSNotModified() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree/rule?treeid=1&level=1&order=0&method_override=post");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.NOT_MODIFIED, query.response().getStatus());
}
@Test
public void handleRuleQSModify() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree/rule?treeid=1&level=1&order=0&description=Testing&method_override=post");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.OK, query.response().getStatus());
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"description\":\"Testing\""));
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"level\":1"));
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"notes\":\"Metric rule\""));
}
@Test
public void handleRulePOSTNew() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.postQuery(tsdb,
"/api/tree/rule", "{\"treeId\":1,\"level\":2,\"order\":2,\"description\":" +
"\"Testing\",\"type\":\"metric\"}");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.OK, query.response().getStatus());
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"description\":\"Testing\""));
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"level\":2"));
}
@Test
public void handleRulePOSTModify() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.postQuery(tsdb,
"/api/tree/rule", "{\"treeId\":1,\"level\":1,\"order\":0,\"description\":" +
"\"Testing\"}");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.OK, query.response().getStatus());
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"description\":\"Testing\""));
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"level\":1"));
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"notes\":\"Metric rule\""));
}
@Test (expected = BadRequestException.class)
public void handleRulesPOSTNoRules() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.postQuery(tsdb,
"/api/tree/rules", "");
rpc.execute(tsdb, query);
}
@Test
public void handleRuleQSPut() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree/rule?treeid=1&level=1&order=0&description=Testing" +
"&method_override=put&type=metric");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.OK, query.response().getStatus());
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"description\":\"Testing\""));
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"level\":1"));
assertFalse(query.response().getContent().toString(MockBase.ASCII())
.contains("\"notes\":\"Metric rule\""));
}
@Test (expected = BadRequestException.class)
public void handleRuleQSPutMissingType() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree/rule?treeid=1&level=1&order=0&description=Testing&method_override=put");
rpc.execute(tsdb, query);
}
@Test
public void handleRulePUT() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.putQuery(tsdb,
"/api/tree/rule", "{\"treeId\":1,\"level\":1,\"order\":0,\"description\":" +
"\"Testing\",\"type\":\"metric\"}");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.OK, query.response().getStatus());
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"description\":\"Testing\""));
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"level\":1"));
assertFalse(query.response().getContent().toString(MockBase.ASCII())
.contains("\"notes\":\"Metric rule\""));
}
@Test
public void handleRuleQSDelete() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree/rule?treeid=1&level=1&order=0&method_override=delete");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus());
assertEquals(3, storage.numColumns(new byte[] { 0, 1 }));
}
@Test (expected = BadRequestException.class)
public void handleRuleQSDeleteNotFound() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree/rule?treeid=1&level=2&order=0&method_override=delete");
rpc.execute(tsdb, query);
}
@Test
public void handleRuleDELETE() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.deleteQuery(tsdb,
"/api/tree/rule", "{\"treeId\":1,\"level\":1,\"order\":0}");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus());
assertEquals(3, storage.numColumns(new byte[] { 0, 1 }));
}
@Test (expected = BadRequestException.class)
public void handleRuleBadMethod() throws Exception {
final HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1,
HttpMethod.TRACE, "/api/tree/rule");
final HttpQuery query = new HttpQuery(tsdb, req, NettyMocks.fakeChannel());
rpc.execute(tsdb, query);
}
@Test (expected = BadRequestException.class)
public void handleRulesGetQS() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree/rules?treeid=1");
rpc.execute(tsdb, query);
}
@Test
public void handleRulesPOST() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.postQuery(tsdb,
"/api/tree/rules", "[{\"treeId\":1,\"level\":0,\"order\":0,\"type\":" +
"\"METRIC\"},{\"treeId\":1,\"level\":0,\"order\":1,\"type\":\"tagk\"," +
"\"field\":\"fqdn\"},{\"treeId\":1,\"level\":1,\"order\":0,\"type\":" +
"\"tagk\",\"field\":\"host\"}]");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus());
assertEquals(5, storage.numColumns(new byte[] { 0, 1 }));
final String rule = new String(storage.getColumn(new byte[] { 0, 1 },
"tree_rule:0:0".getBytes(MockBase.ASCII())), MockBase.ASCII());
assertTrue(rule.contains("\"type\":\"METRIC\""));
assertTrue(rule.contains("description\":\"Host Name\""));
}
@Test (expected = BadRequestException.class)
public void handleRulesPOSTEmpty() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.postQuery(tsdb,
"/api/tree/rules", "[]]");
rpc.execute(tsdb, query);
}
@Test
public void handleRulesPUT() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.putQuery(tsdb,
"/api/tree/rules", "[{\"treeId\":1,\"level\":0,\"order\":0,\"type\":" +
"\"METRIC\"},{\"treeId\":1,\"level\":0,\"order\":1,\"type\":\"tagk\"," +
"\"field\":\"fqdn\"},{\"treeId\":1,\"level\":1,\"order\":0,\"type\":" +
"\"tagk\",\"field\":\"host\"}]");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus());
assertEquals(5, storage.numColumns(new byte[] { 0, 1 }));
final String rule = new String(storage.getColumn(new byte[] { 0, 1 },
"tree_rule:0:0".getBytes(MockBase.ASCII())), MockBase.ASCII());
assertTrue(rule.contains("\"type\":\"METRIC\""));
assertFalse(rule.contains("\"description\":\"Host Name\""));
}
@Test (expected = BadRequestException.class)
public void handleRulesPOSTTreeMissmatch() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.postQuery(tsdb,
"/api/tree/rules", "[{\"treeId\":2,\"level\":0,\"order\":0,\"type\":" +
"\"METRIC\"},{\"treeId\":1,\"level\":0,\"order\":1,\"type\":\"tagk\"," +
"\"field\":\"fqdn\"},{\"treeId\":1,\"level\":1,\"order\":0,\"type\":" +
"\"tagk\",\"field\":\"host\"}]");
rpc.execute(tsdb, query);
}
@Test
public void handleRulesDeleteQS() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree/rules?treeid=1&method_override=delete");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus());
assertEquals(2, storage.numColumns(new byte[] { 0, 1 }));
}
@Test
public void handleRulesDelete() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.deleteQuery(tsdb,
"/api/tree/rules?treeid=1", "");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus());
assertEquals(2, storage.numColumns(new byte[] { 0, 1 }));
}
@Test (expected = BadRequestException.class)
public void handleRulesDeleteTreeNotFound() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.deleteQuery(tsdb,
"/api/tree/rules?treeid=5", "");
rpc.execute(tsdb, query);
}
@Test
public void handleTestQS() throws Exception {
setupStorage();
setupBranch();
setupTSMeta();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree/test?treeid=1&tsuids=000001000001000001000002000002");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.OK, query.response().getStatus());
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("Adding leaf"));
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("000001000001000001000002000002"));
}
@Test
public void handleTestQSMulti() throws Exception {
setupStorage();
setupBranch();
setupTSMeta();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree/test?treeid=1&tsuids=000001000001000001000002000002," +
"000001000001000001000002000003");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.OK, query.response().getStatus());
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("Adding leaf"));
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("000001000001000001000002000002"));
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("000001000001000001000002000003"));
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("Unable to locate TSUID meta data"));
}
@Test
public void handleTestPOST() throws Exception {
setupStorage();
setupBranch();
setupTSMeta();
HttpQuery query = NettyMocks.postQuery(tsdb,
"/api/tree/test", "{\"treeId\":1,\"tsuids\":[" +
"\"000001000001000001000002000002\"]}");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.OK, query.response().getStatus());
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("Adding leaf"));
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("000001000001000001000002000002"));
}
@Test
public void handleTestPUT() throws Exception {
setupStorage();
setupBranch();
setupTSMeta();
HttpQuery query = NettyMocks.putQuery(tsdb,
"/api/tree/test", "{\"treeId\":1,\"tsuids\":[" +
"\"000001000001000001000002000002\"]}");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.OK, query.response().getStatus());
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("Adding leaf"));
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("000001000001000001000002000002"));
}
@Test
public void handleTestPOSTMulti() throws Exception {
setupStorage();
setupBranch();
setupTSMeta();
HttpQuery query = NettyMocks.postQuery(tsdb,
"/api/tree/test", "{\"treeId\":1,\"tsuids\":[" +
"\"000001000001000001000002000002\"," +
"\"000001000001000001000002000003\"]}");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.OK, query.response().getStatus());
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("Adding leaf"));
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("000001000001000001000002000002"));
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("000001000001000001000002000003"));
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("Unable to locate TSUID meta data"));
}
@Test
public void handleTestTSUIDNotFound() throws Exception {
setupStorage();
setupBranch();
setupTSMeta();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree/test?treeid=1&tsuids=000001000001000001000002000003");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.OK, query.response().getStatus());
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("Unable to locate TSUID meta data"));
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("000001000001000001000002000003"));
}
@Test
public void handleTestNSU() throws Exception {
setupStorage();
setupBranch();
setupTSMeta();
storage.flushRow(new byte[] { 0, 0, 2 });
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree/test?treeid=1&tsuids=000001000001000001000002000002");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.OK, query.response().getStatus());
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("TSUID was missing a UID name"));
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("000001000001000001000002000002"));
}
@Test (expected = BadRequestException.class)
public void handleTestTreeNotFound() throws Exception {
setupStorage();
setupBranch();
setupTSMeta();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree/test?treeid=3&tsuids=000001000001000001000002000002");
rpc.execute(tsdb, query);
}
@Test (expected = BadRequestException.class)
public void handleTestMissingTreeId() throws Exception {
setupStorage();
setupBranch();
setupTSMeta();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree/test?tsuids=000001000001000001000002000002");
rpc.execute(tsdb, query);
}
@Test (expected = BadRequestException.class)
public void handleTestQSMissingTSUIDs() throws Exception {
setupStorage();
setupBranch();
setupTSMeta();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree/test?treeid=1");
rpc.execute(tsdb, query);
}
@Test (expected = BadRequestException.class)
public void handleTestPOSTMissingTSUIDs() throws Exception {
setupStorage();
setupBranch();
setupTSMeta();
HttpQuery query = NettyMocks.postQuery(tsdb,
"/api/tree/test", "{\"treeId\":1}");
rpc.execute(tsdb, query);
}
@Test (expected = BadRequestException.class)
public void handleTestBadMethod() throws Exception {
final HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1,
HttpMethod.TRACE, "/api/tree/test");
final HttpQuery query = new HttpQuery(tsdb, req, NettyMocks.fakeChannel());
rpc.execute(tsdb, query);
}
@Test
public void handleCollissionsQS() throws Exception {
setupStorage();
setupBranch();
setupTSMeta();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree/collisions?treeid=1");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.OK, query.response().getStatus());
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"010101\":\"AAAAAA\""));
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"020202\":\"BBBBBB\""));
}
@Test
public void handleCollissionsQSSingleTSUID() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree/collisions?treeid=1&tsuids=010101");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.OK, query.response().getStatus());
assertEquals("{\"010101\":\"AAAAAA\"}",
query.response().getContent().toString(MockBase.ASCII()));
}
@Test
public void handleCollissionsQSTSUIDs() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree/collisions?treeid=1&tsuids=010101,020202");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.OK, query.response().getStatus());
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"010101\":\"AAAAAA\""));
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"020202\":\"BBBBBB\""));
}
@Test
public void handleCollissionsQSTSUIDNotFound() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree/collisions?treeid=1&tsuids=030101");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.OK, query.response().getStatus());
assertEquals("{}",
query.response().getContent().toString(MockBase.ASCII()));
}
@Test
public void handleCollissionsPOST() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.postQuery(tsdb,
"/api/tree/collisions", "{\"treeId\":1}");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.OK, query.response().getStatus());
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"010101\":\"AAAAAA\""));
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"020202\":\"BBBBBB\""));
}
@Test
public void handleCollissionsPOSTSingleTSUID() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.postQuery(tsdb,
"/api/tree/collisions", "{\"treeId\":1,\"tsuids\":[\"020202\"]}");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.OK, query.response().getStatus());
assertEquals("{\"020202\":\"BBBBBB\"}",
query.response().getContent().toString(MockBase.ASCII()));
}
@Test
public void handleCollissionsPOSTTSUIDs() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.postQuery(tsdb,
"/api/tree/collisions", "{\"treeId\":1,\"tsuids\":" +
"[\"010101\",\"020202\"]}");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.OK, query.response().getStatus());
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"010101\":\"AAAAAA\""));
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"020202\":\"BBBBBB\""));
}
@Test (expected = BadRequestException.class)
public void handleCollissionsTreeNotFound() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree/collisions?treeid=5");
rpc.execute(tsdb, query);
}
@Test (expected = BadRequestException.class)
public void handleCollissionsMissingTreeId() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree/collisions");
rpc.execute(tsdb, query);
}
@Test (expected = BadRequestException.class)
public void handleCollissionsBadMethod() throws Exception {
final HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1,
HttpMethod.TRACE, "/api/tree/collisions");
final HttpQuery query = new HttpQuery(tsdb, req, NettyMocks.fakeChannel());
rpc.execute(tsdb, query);
}
@Test
public void handleNotMatchedQS() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree/notmatched?treeid=1");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.OK, query.response().getStatus());
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"010101\":\"Failed rule 0:0\""));
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"020202\":\"Failed rule 1:1\""));
}
@Test
public void handleNotMatchedQSSingleTSUID() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree/notmatched?treeid=1&tsuids=010101");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.OK, query.response().getStatus());
assertEquals("{\"010101\":\"Failed rule 0:0\"}",
query.response().getContent().toString(MockBase.ASCII()));
}
@Test
public void handleNotMatchedQSTSUIDs() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree/notmatched?treeid=1&tsuids=010101,020202");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.OK, query.response().getStatus());
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"010101\":\"Failed rule 0:0\""));
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"020202\":\"Failed rule 1:1\""));
}
@Test
public void handleNotMatchedQSTSUIDNotFound() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree/notmatched?treeid=1&tsuids=030101");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.OK, query.response().getStatus());
assertEquals("{}",
query.response().getContent().toString(MockBase.ASCII()));
}
@Test
public void handleNotMatchedPOST() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.postQuery(tsdb,
"/api/tree/notmatched", "{\"treeId\":1}");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.OK, query.response().getStatus());
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"010101\":\"Failed rule 0:0\""));
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"020202\":\"Failed rule 1:1\""));
}
@Test
public void handleNotMatchedPOSTSingleTSUID() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.postQuery(tsdb,
"/api/tree/notmatched", "{\"treeId\":1,\"tsuids\":[\"020202\"]}");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.OK, query.response().getStatus());
assertEquals("{\"020202\":\"Failed rule 1:1\"}",
query.response().getContent().toString(MockBase.ASCII()));
}
@Test
public void handleNotMatchedPOSTTSUIDs() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.postQuery(tsdb,
"/api/tree/notmatched", "{\"treeId\":1,\"tsuids\":" +
"[\"010101\",\"020202\"]}");
rpc.execute(tsdb, query);
assertEquals(HttpResponseStatus.OK, query.response().getStatus());
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"010101\":\"Failed rule 0:0\""));
assertTrue(query.response().getContent().toString(MockBase.ASCII())
.contains("\"020202\":\"Failed rule 1:1\""));
}
@Test (expected = BadRequestException.class)
public void handleNotMatchedNotFound() throws Exception {
setupStorage();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree/notmatched?treeid=5");
rpc.execute(tsdb, query);
}
@Test (expected = BadRequestException.class)
public void handleNotMatchedMissingTreeId() throws Exception {
setupStorage();
setupBranch();
setupTSMeta();
HttpQuery query = NettyMocks.getQuery(tsdb,
"/api/tree/notmatched");
rpc.execute(tsdb, query);
}
@Test (expected = BadRequestException.class)
public void handleNotMatchedBadMethod() throws Exception {
final HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1,
HttpMethod.TRACE, "/api/tree/notmatched");
final HttpQuery query = new HttpQuery(tsdb, req, NettyMocks.fakeChannel());
rpc.execute(tsdb, query);
}
/**
* Setups objects in MockBase including two trees, rule sets, root branch,
* child branch, leaves and some collisions and no matches. These are used for
* most of the tests so they're all here.
*/
private void setupStorage() throws Exception {
Tree tree = TestTree.buildTestTree();
// store root
TreeMap<Integer, String> root_path = new TreeMap<Integer, String>();
Branch root = new Branch(tree.getTreeId());
root.setDisplayName("ROOT");
root_path.put(0, "ROOT");
root.prependParentPath(root_path);
storage.addColumn(root.compileBranchId(), Tree.TREE_FAMILY(),
"branch".getBytes(MockBase.ASCII()),
(byte[])branchToStorageJson.invoke(root));
// store the first tree
byte[] key = new byte[] { 0, 1 };
storage.addColumn(key, Tree.TREE_FAMILY(), "tree".getBytes(MockBase.ASCII()),
(byte[])TreetoStorageJson.invoke(TestTree.buildTestTree()));
TreeRule rule = new TreeRule(1);
rule.setField("host");
rule.setDescription("Hostname rule");
rule.setType(TreeRuleType.TAGK);
rule.setDescription("Host Name");
storage.addColumn(key, Tree.TREE_FAMILY(),
"tree_rule:0:0".getBytes(MockBase.ASCII()),
JSON.serializeToBytes(rule));
rule = new TreeRule(1);
rule.setField("");
rule.setLevel(1);
rule.setNotes("Metric rule");
rule.setType(TreeRuleType.METRIC);
storage.addColumn(key, Tree.TREE_FAMILY(),
"tree_rule:1:0".getBytes(MockBase.ASCII()),
JSON.serializeToBytes(rule));
root = new Branch(1);
root.setDisplayName("ROOT");
root_path = new TreeMap<Integer, String>();
root_path.put(0, "ROOT");
root.prependParentPath(root_path);
storage.addColumn(key, Tree.TREE_FAMILY(),
"branch".getBytes(MockBase.ASCII()),
(byte[])branchToStorageJson.invoke(root));
// tree 2
key = new byte[] { 0, 2 };
Tree tree2 = new Tree();
tree2.setTreeId(2);
tree2.setName("2nd Tree");
tree2.setDescription("Other Tree");
storage.addColumn(key, Tree.TREE_FAMILY(), "tree".getBytes(MockBase.ASCII()),
(byte[])TreetoStorageJson.invoke(tree2));
rule = new TreeRule(2);
rule.setField("host");
rule.setType(TreeRuleType.TAGK);
storage.addColumn(key, Tree.TREE_FAMILY(),
"tree_rule:0:0".getBytes(MockBase.ASCII()),
JSON.serializeToBytes(rule));
rule = new TreeRule(2);
rule.setField("");
rule.setLevel(1);
rule.setType(TreeRuleType.METRIC);
storage.addColumn(key, Tree.TREE_FAMILY(),
"tree_rule:1:0".getBytes(MockBase.ASCII()),
JSON.serializeToBytes(rule));
root = new Branch(2);
root.setDisplayName("ROOT");
root_path = new TreeMap<Integer, String>();
root_path.put(0, "ROOT");
root.prependParentPath(root_path);
storage.addColumn(key, Tree.TREE_FAMILY(),
"branch".getBytes(MockBase.ASCII()),
(byte[])branchToStorageJson.invoke(root));
// sprinkle in some collisions and no matches for fun
// collisions
key = new byte[] { 0, 1, 1 };
String tsuid = "010101";
byte[] qualifier = new byte[Tree.COLLISION_PREFIX().length +
(tsuid.length() / 2)];
System.arraycopy(Tree.COLLISION_PREFIX(), 0, qualifier, 0,
Tree.COLLISION_PREFIX().length);
byte[] tsuid_bytes = UniqueId.stringToUid(tsuid);
System.arraycopy(tsuid_bytes, 0, qualifier, Tree.COLLISION_PREFIX().length,
tsuid_bytes.length);
storage.addColumn(key, Tree.TREE_FAMILY(), qualifier,
"AAAAAA".getBytes(MockBase.ASCII()));
tsuid = "020202";
qualifier = new byte[Tree.COLLISION_PREFIX().length +
(tsuid.length() / 2)];
System.arraycopy(Tree.COLLISION_PREFIX(), 0, qualifier, 0,
Tree.COLLISION_PREFIX().length);
tsuid_bytes = UniqueId.stringToUid(tsuid);
System.arraycopy(tsuid_bytes, 0, qualifier, Tree.COLLISION_PREFIX().length,
tsuid_bytes.length);
storage.addColumn(key, Tree.TREE_FAMILY(), qualifier,
"BBBBBB".getBytes(MockBase.ASCII()));
// not matched
key = new byte[] { 0, 1, 2 };
tsuid = "010101";
qualifier = new byte[Tree.NOT_MATCHED_PREFIX().length +
(tsuid.length() / 2)];
System.arraycopy(Tree.NOT_MATCHED_PREFIX(), 0, qualifier, 0,
Tree.NOT_MATCHED_PREFIX().length);
tsuid_bytes = UniqueId.stringToUid(tsuid);
System.arraycopy(tsuid_bytes, 0, qualifier, Tree.NOT_MATCHED_PREFIX().length,
tsuid_bytes.length);
storage.addColumn(key, Tree.TREE_FAMILY(), qualifier,
"Failed rule 0:0".getBytes(MockBase.ASCII()));
tsuid = "020202";
qualifier = new byte[Tree.NOT_MATCHED_PREFIX().length +
(tsuid.length() / 2)];
System.arraycopy(Tree.NOT_MATCHED_PREFIX(), 0, qualifier, 0,
Tree.NOT_MATCHED_PREFIX().length);
tsuid_bytes = UniqueId.stringToUid(tsuid);
System.arraycopy(tsuid_bytes, 0, qualifier, Tree.NOT_MATCHED_PREFIX().length,
tsuid_bytes.length);
storage.addColumn(key, Tree.TREE_FAMILY(), qualifier,
"Failed rule 1:1".getBytes(MockBase.ASCII()));
// drop some branches in for tree 1
Branch branch = new Branch(1);
TreeMap<Integer, String> path = new TreeMap<Integer, String>();
path.put(0, "ROOT");
path.put(1, "sys");
path.put(2, "cpu");
branch.prependParentPath(path);
branch.setDisplayName("cpu");
storage.addColumn(branch.compileBranchId(), Tree.TREE_FAMILY(),
"branch".getBytes(MockBase.ASCII()),
(byte[])branchToStorageJson.invoke(branch));
Leaf leaf = new Leaf("user", "000001000001000001");
qualifier = leaf.columnQualifier();
storage.addColumn(branch.compileBranchId(), Tree.TREE_FAMILY(),
qualifier, (byte[])LeaftoStorageJson.invoke(leaf));
leaf = new Leaf("nice", "000002000002000002");
qualifier = leaf.columnQualifier();
storage.addColumn(branch.compileBranchId(), Tree.TREE_FAMILY(),
qualifier, (byte[])LeaftoStorageJson.invoke(leaf));
// child branch
branch = new Branch(1);
path.put(3, "mboard");
branch.prependParentPath(path);
branch.setDisplayName("mboard");
storage.addColumn(branch.compileBranchId(), Tree.TREE_FAMILY(),
"branch".getBytes(MockBase.ASCII()),
(byte[])branchToStorageJson.invoke(branch));
leaf = new Leaf("Asus", "000003000003000003");
qualifier = leaf.columnQualifier();
storage.addColumn(branch.compileBranchId(), Tree.TREE_FAMILY(),
qualifier, (byte[])LeaftoStorageJson.invoke(leaf));
}
/**
* Sets up some UID name maps in storage for use when loading leaves from a
* branch. Without these, the unit tests will fail since the leaves couldn't
* find their name maps.
*/
private void setupBranch() {
storage.addColumn(new byte[] { 0, 0, 1 }, NAME_FAMILY,
"metrics".getBytes(MockBase.ASCII()),
"sys.cpu.0".getBytes(MockBase.ASCII()));
storage.addColumn(new byte[] { 0, 0, 1 }, NAME_FAMILY,
"tagk".getBytes(MockBase.ASCII()),
"host".getBytes(MockBase.ASCII()));
storage.addColumn(new byte[] { 0, 0, 1 }, NAME_FAMILY,
"tagv".getBytes(MockBase.ASCII()),
"web01".getBytes(MockBase.ASCII()));
}
/**
* Sets up a TSMeta object and associated UIDMeta objects in storage for
* testing the "test" call. These are necessary as the TSMeta is loaded when
* parsed through the tree.
*/
private void setupTSMeta() throws Exception {
final TSMeta meta = new TSMeta("000001000001000001000002000002");
storage.addColumn(UniqueId.stringToUid("000001000001000001000002000002"),
NAME_FAMILY, "ts_meta".getBytes(MockBase.ASCII()),
(byte[])TSMetagetStorageJSON.invoke(meta));
final UIDMeta metric = new UIDMeta(UniqueIdType.METRIC, new byte[] { 0, 0, 1 },
"sys.cpu.0");
storage.addColumn(new byte[] { 0, 0, 1 }, NAME_FAMILY,
"metric_meta".getBytes(MockBase.ASCII()),
(byte[])UIDMetagetStorageJSON.invoke(metric));
final UIDMeta tagk1 = new UIDMeta(UniqueIdType.TAGK, new byte[] { 0, 0, 1 },
"host");
storage.addColumn(new byte[] { 0, 0, 1 }, NAME_FAMILY,
"tagk_meta".getBytes(MockBase.ASCII()),
(byte[])UIDMetagetStorageJSON.invoke(tagk1));
final UIDMeta tagv1 = new UIDMeta(UniqueIdType.TAGV, new byte[] { 0, 0, 1 },
"web-01.lga.mysite.com");
storage.addColumn(new byte[] { 0, 0, 1 }, NAME_FAMILY,
"tagv_meta".getBytes(MockBase.ASCII()),
(byte[])UIDMetagetStorageJSON.invoke(tagv1));
final UIDMeta tagk2 = new UIDMeta(UniqueIdType.TAGK, new byte[] { 0, 0, 2 },
"type");
storage.addColumn(new byte[] { 0, 0, 2 }, NAME_FAMILY,
"tagk_meta".getBytes(MockBase.ASCII()),
(byte[])UIDMetagetStorageJSON.invoke(tagk2));
final UIDMeta tagv2 = new UIDMeta(UniqueIdType.TAGV, new byte[] { 0, 0, 2 },
"user");
storage.addColumn(new byte[] { 0, 0, 2 }, NAME_FAMILY,
"tagv_meta".getBytes(MockBase.ASCII()),
(byte[])UIDMetagetStorageJSON.invoke(tagv2));
storage.addColumn(new byte[] { 0, 0, 2 }, NAME_FAMILY,
"tagk".getBytes(MockBase.ASCII()),
"type".getBytes(MockBase.ASCII()));
storage.addColumn(new byte[] { 0, 0, 2 }, NAME_FAMILY,
"tagv".getBytes(MockBase.ASCII()),
"user".getBytes(MockBase.ASCII()));
}
}
|
package com.avaje.ebeaninternal.server.persist.dml;
import java.sql.SQLException;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.persistence.PersistenceException;
import com.avaje.ebeaninternal.api.SpiTransaction;
import com.avaje.ebeaninternal.server.core.PersistRequest;
import com.avaje.ebeaninternal.server.core.PersistRequestBean;
import com.avaje.ebeaninternal.server.lib.util.StringHelper;
import com.avaje.ebeaninternal.server.persist.BeanPersister;
/**
* Bean persister that uses the Handler and Meta objects.
* <p>
* The design of this is based on the immutable Meta objects. They hold a
* information in the form of lists of Bindable objects. This effectively
* flattens the structure of the bean with embedded and associated objects into
* a flat list of Bindable objects.
* </p>
*/
public final class DmlBeanPersister implements BeanPersister {
private static final Logger logger = Logger.getLogger(DmlBeanPersister.class.getName());
private final UpdateMeta updateMeta;
private final InsertMeta insertMeta;
private final DeleteMeta deleteMeta;
public DmlBeanPersister(UpdateMeta updateMeta, InsertMeta insertMeta, DeleteMeta deleteMeta) {
this.updateMeta = updateMeta;
this.insertMeta = insertMeta;
this.deleteMeta = deleteMeta;
}
/**
* execute the bean delete request.
*/
public void delete(PersistRequestBean<?> request) {
DeleteHandler delete = new DeleteHandler(request, deleteMeta);
execute(request, delete);
}
/**
* execute the bean insert request.
*/
public void insert(PersistRequestBean<?> request) {
InsertHandler insert = new InsertHandler(request, insertMeta);
execute(request, insert);
}
/**
* execute the bean update request.
*/
public void update(PersistRequestBean<?> request) {
UpdateHandler update = new UpdateHandler(request, updateMeta);
execute(request, update);
}
/**
* execute request taking batching into account.
*/
private void execute(PersistRequest request, PersistHandler handler) {
SpiTransaction trans = request.getTransaction();
boolean batchThisRequest = trans.isBatchThisRequest();
try {
handler.bind();
if (batchThisRequest) {
handler.addBatch();
} else {
// immediate insert
handler.execute();
}
} catch (SQLException e) {
// log the error to the transaction log
String errMsg = StringHelper.replaceStringMulti(e.getMessage(), new String[]{"\r","\n"}, "\\n ");
String msg = "ERROR executing DML bindLog["+handler.getBindLog()+"] error["+errMsg+"]";
if (request.getTransaction().isLogSummary()) {
request.getTransaction().logInternal(msg);
}
throw new PersistenceException(msg, e);
} finally {
if (!batchThisRequest && handler != null) {
try {
handler.close();
} catch (SQLException e) {
logger.log(Level.SEVERE, null, e);
}
}
}
}
}
|
package org.jboss.errai.marshalling.client.api;
import org.jboss.errai.jpa.sync.client.shared.SyncRequestOperation;
import org.jboss.errai.jpa.sync.client.shared.SyncRequestOperation.Type;
import org.jboss.errai.marshalling.client.Marshalling;
import org.jboss.errai.marshalling.client.api.json.EJObject;
import org.jboss.errai.marshalling.client.api.json.EJValue;
import org.jboss.errai.marshalling.client.marshallers.ObjectMarshaller;
public class Marshaller_o_j_e_j_s_c_s_SyncRequestOperation_1_Impl implements GeneratedMarshaller<SyncRequestOperation> {
private SyncRequestOperation[] EMPTY_ARRAY = new SyncRequestOperation[0];
private Marshaller<Type> org_jboss_errai_jpa_sync_client_shared_SyncRequestOperation_erraiD_Type = null;
private Marshaller java_lang_Object = Marshalling.getMarshaller(Object.class);
public SyncRequestOperation[] getEmptyArray() {
return EMPTY_ARRAY;
}
native static Object SyncRequestOperation_Object_newState(SyncRequestOperation instance) /*-{
return instance.@org.jboss.errai.jpa.sync.client.shared.SyncRequestOperation::newState;
}-*/;
native static void SyncRequestOperation_Object_newState(SyncRequestOperation instance, Object value) /*-{
instance.@org.jboss.errai.jpa.sync.client.shared.SyncRequestOperation::newState = value;
}-*/;
public SyncRequestOperation demarshall(EJValue a0, MarshallingSession a1) {
lazyInit();
EJObject obj = a0.isObject();
if (obj == null) {
return null;
}
String objId = obj.get("^ObjectID").isString().stringValue();
if (a1.hasObject(objId)) {
return a1.getObject(SyncRequestOperation.class, objId);
}
final Type c0 = obj.get("type").isObject() != null ? Enum.valueOf(Type.class, obj.get("type").isObject().get("^EnumStringValue").isString().stringValue()) : obj.get("type").isString() != null ? Enum.valueOf(Type.class, obj.get("type").isString().stringValue()) : null;
final Object c1 = ((ObjectMarshaller) java_lang_Object).demarshall(Object.class, obj.get("newState"), a1);
final Object c2 = ((ObjectMarshaller) java_lang_Object).demarshall(Object.class, obj.get("expectedState"), a1);
SyncRequestOperation entity = new SyncRequestOperation(c0, c1, c2);
a1.recordObject(objId, entity);
if ((obj.containsKey("newState")) && (!obj.get("newState").isNull())) {
SyncRequestOperation_Object_newState(entity, ((ObjectMarshaller) java_lang_Object).demarshall(Object.class, obj.get("newState"), a1));
}
return entity;
}
public String marshall(SyncRequestOperation a0, MarshallingSession a1) {
lazyInit();
if (a0 == null) {
return "null";
}
final boolean ref = a1.hasObject(a0);
final StringBuilder json = new StringBuilder("{\"^EncodedType\":\"org.jboss.errai.jpa.sync.client.shared.SyncRequestOperation\",\"^ObjectID\"");
json.append(":\"").append(a1.getObject(a0)).append("\"");
if (ref) {
return json.append("}").toString();
}
return json.append(",").append("\"type\":").append(a0.getType() != null ? new StringBuilder(64).append("{\"^EncodedType\":\"org.jboss.errai.jpa.sync.client.shared.SyncRequestOperation$Type\",\"^EnumStringValue\":\"").append(a0.getType().name()).append("\"}") : "null").append(",").append("\"newState\":").append(java_lang_Object.marshall(SyncRequestOperation_Object_newState(a0), a1)).append(",").append("\"expectedState\":").append(java_lang_Object.marshall(a0.getExpectedState(), a1)).append("}").toString();
}
private void lazyInit() {
if (org_jboss_errai_jpa_sync_client_shared_SyncRequestOperation_erraiD_Type == null) {
org_jboss_errai_jpa_sync_client_shared_SyncRequestOperation_erraiD_Type = Marshalling.getMarshaller(Type.class);
}
}
}
|
/*
* Copyright (c) 2021 Daimler TSS GmbH
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0
*
* SPDX-License-Identifier: Apache-2.0
*
* Contributors:
* Daimler TSS GmbH - Initial Test
*
*/
package org.eclipse.dataspaceconnector.sql.pool.commons;
import org.eclipse.dataspaceconnector.spi.persistence.EdcPersistenceException;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import org.mockito.Mockito;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import javax.sql.DataSource;
class CommonsConnectionPoolTest {
@Test
void getConnection() throws SQLException {
Connection connection = Mockito.mock(Connection.class);
PreparedStatement testQueryPreparedStatement = Mockito.mock(PreparedStatement.class);
DataSource dataSource = Mockito.mock(DataSource.class);
CommonsConnectionPoolConfig commonsConnectionPoolConfig = CommonsConnectionPoolConfig.Builder.newInstance().build();
CommonsConnectionPool connectionPool = new CommonsConnectionPool(dataSource, commonsConnectionPoolConfig);
Mockito.when(testQueryPreparedStatement.execute()).thenReturn(true);
Mockito.when(connection.prepareStatement(Mockito.anyString())).thenReturn(testQueryPreparedStatement);
Mockito.when(dataSource.getConnection()).thenReturn(connection);
Connection result = connectionPool.getConnection();
Assertions.assertNotNull(connection);
Assertions.assertEquals(connection, result);
Mockito.verify(dataSource, Mockito.atLeastOnce()).getConnection();
Mockito.verify(connection, Mockito.atLeastOnce()).isClosed();
Mockito.verify(connection, Mockito.atLeastOnce()).prepareStatement(Mockito.anyString());
Mockito.verify(testQueryPreparedStatement, Mockito.atLeastOnce()).execute();
}
@Test
void getConnectionAnyExceptionThrownThrowsSqlException() throws SQLException {
DataSource dataSource = Mockito.mock(DataSource.class);
CommonsConnectionPoolConfig commonsConnectionPoolConfig = CommonsConnectionPoolConfig.Builder.newInstance().build();
CommonsConnectionPool connectionPool = new CommonsConnectionPool(dataSource, commonsConnectionPoolConfig);
RuntimeException causingRuntimeException = new RuntimeException("intended to be thrown");
Mockito.when(dataSource.getConnection()).thenThrow(causingRuntimeException);
EdcPersistenceException exceptionWrappingRuntimeException = Assertions.assertThrows(EdcPersistenceException.class, connectionPool::getConnection);
Assertions.assertNotNull(exceptionWrappingRuntimeException.getCause());
Assertions.assertEquals(causingRuntimeException, exceptionWrappingRuntimeException.getCause());
Mockito.verify(dataSource, Mockito.atLeastOnce()).getConnection();
}
@Test
void getConnectionSqlExceptionThrownThrowsSame() throws SQLException {
DataSource dataSource = Mockito.mock(DataSource.class);
CommonsConnectionPoolConfig commonsConnectionPoolConfig = CommonsConnectionPoolConfig.Builder.newInstance().build();
CommonsConnectionPool connectionPool = new CommonsConnectionPool(dataSource, commonsConnectionPoolConfig);
SQLException causingSqlException = new SQLException("intended to be thrown");
Mockito.when(dataSource.getConnection()).thenThrow(causingSqlException);
EdcPersistenceException sqlException = Assertions.assertThrows(EdcPersistenceException.class, connectionPool::getConnection);
Assertions.assertNotNull(sqlException.getCause());
Assertions.assertEquals(causingSqlException, sqlException.getCause());
Mockito.verify(dataSource, Mockito.atLeastOnce()).getConnection();
}
@Test
void returnConnectionNullThrowsNullPointerException() {
DataSource dataSource = Mockito.mock(DataSource.class);
CommonsConnectionPoolConfig commonsConnectionPoolConfig = CommonsConnectionPoolConfig.Builder.newInstance().build();
CommonsConnectionPool connectionPool = new CommonsConnectionPool(dataSource, commonsConnectionPoolConfig);
Assertions.assertThrows(NullPointerException.class, () -> connectionPool.returnConnection(null));
}
@Test
void returnConnectionUnknownThrowsIllegalStateException() {
DataSource dataSource = Mockito.mock(DataSource.class);
CommonsConnectionPoolConfig commonsConnectionPoolConfig = CommonsConnectionPoolConfig.Builder.newInstance().build();
CommonsConnectionPool connectionPool = new CommonsConnectionPool(dataSource, commonsConnectionPoolConfig);
// a connection unmanaged by the pool
Connection connection = Mockito.mock(Connection.class);
Assertions.assertThrows(IllegalStateException.class, () -> connectionPool.returnConnection(connection));
}
@Test
void returnConnection() throws SQLException {
Connection connection = Mockito.mock(Connection.class);
PreparedStatement testQueryPreparedStatement = Mockito.mock(PreparedStatement.class);
DataSource dataSource = Mockito.mock(DataSource.class);
CommonsConnectionPoolConfig commonsConnectionPoolConfig = CommonsConnectionPoolConfig.Builder.newInstance().build();
CommonsConnectionPool connectionPool = new CommonsConnectionPool(dataSource, commonsConnectionPoolConfig);
Mockito.when(testQueryPreparedStatement.execute()).thenReturn(true);
Mockito.when(connection.prepareStatement(Mockito.anyString())).thenReturn(testQueryPreparedStatement);
Mockito.when(dataSource.getConnection()).thenReturn(connection);
Connection result = connectionPool.getConnection();
Assertions.assertNotNull(connection);
Assertions.assertEquals(connection, result);
connectionPool.returnConnection(result);
Mockito.verify(dataSource, Mockito.atLeastOnce()).getConnection();
Mockito.verify(connection, Mockito.atLeastOnce()).isClosed();
Mockito.verify(connection, Mockito.atLeastOnce()).prepareStatement(Mockito.anyString());
Mockito.verify(testQueryPreparedStatement, Mockito.atLeastOnce()).execute();
}
@Test
void returnConnectionProperlyClosed() throws SQLException {
Connection connection = Mockito.mock(Connection.class);
PreparedStatement testQueryPreparedStatement = Mockito.mock(PreparedStatement.class);
Mockito.when(testQueryPreparedStatement.execute()).thenReturn(false);
Mockito.when(connection.prepareStatement(Mockito.anyString())).thenReturn(testQueryPreparedStatement);
DataSource dataSource = Mockito.mock(DataSource.class);
Mockito.when(dataSource.getConnection()).thenReturn(connection);
CommonsConnectionPoolConfig commonsConnectionPoolConfig = CommonsConnectionPoolConfig.Builder.newInstance()
.testConnectionOnCreate(false)
.testConnectionOnBorrow(false)
.build();
CommonsConnectionPool connectionPool = new CommonsConnectionPool(dataSource, commonsConnectionPoolConfig);
Connection result = connectionPool.getConnection();
Assertions.assertNotNull(connection);
Assertions.assertEquals(connection, result);
Mockito.when(connection.isClosed()).thenReturn(false);
connectionPool.returnConnection(connection);
Mockito.verify(dataSource, Mockito.atLeastOnce()).getConnection();
Mockito.verify(connection, Mockito.atLeastOnce()).isClosed();
Mockito.verify(connection, Mockito.atLeastOnce()).prepareStatement(Mockito.anyString());
Mockito.verify(testQueryPreparedStatement, Mockito.atLeastOnce()).execute();
Mockito.verify(connection, Mockito.atLeastOnce()).close();
}
@Test
void closeProperlyClosesManagedConnections() throws SQLException {
Connection connection = Mockito.mock(Connection.class);
PreparedStatement testQueryPreparedStatement = Mockito.mock(PreparedStatement.class);
Mockito.when(testQueryPreparedStatement.execute()).thenReturn(true);
Mockito.when(connection.prepareStatement(Mockito.anyString())).thenReturn(testQueryPreparedStatement);
DataSource dataSource = Mockito.mock(DataSource.class);
Mockito.when(dataSource.getConnection()).thenReturn(connection);
CommonsConnectionPoolConfig commonsConnectionPoolConfig = CommonsConnectionPoolConfig.Builder.newInstance().build();
CommonsConnectionPool connectionPool = new CommonsConnectionPool(dataSource, commonsConnectionPoolConfig);
Connection result = connectionPool.getConnection();
Assertions.assertNotNull(connection);
Assertions.assertEquals(connection, result);
connectionPool.returnConnection(connection);
connectionPool.close();
Mockito.verify(dataSource, Mockito.atLeastOnce()).getConnection();
Mockito.verify(connection, Mockito.atLeastOnce()).isClosed();
Mockito.verify(connection, Mockito.atLeastOnce()).prepareStatement(Mockito.anyString());
Mockito.verify(testQueryPreparedStatement, Mockito.atLeastOnce()).execute();
Mockito.verify(connection, Mockito.atLeastOnce()).close();
}
}
|
/*
Derby - Class org.apache.derbyTesting.functionTests.tests.lang.SystemCatalogTest
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package org.apache.derbyTesting.functionTests.tests.lang;
import java.sql.Connection;
import java.sql.SQLException;
import java.sql.Statement;
import java.sql.ResultSet;
import org.apache.derbyTesting.junit.BaseJDBCTestCase;
import org.apache.derbyTesting.junit.JDBC;
import org.apache.derbyTesting.junit.TestConfiguration;
import junit.framework.Test;
import junit.framework.TestSuite;
/**
* Tests concerning the system catalogs.
*
* Retaining comment from previous .sql test:
* RESOLVE - add selects from sysdepends when simplified
*
*/
public class SystemCatalogTest extends BaseJDBCTestCase {
public SystemCatalogTest(String name) {
super(name);
}
public static Test suite() {
Test suite = TestConfiguration.defaultSuite(SystemCatalogTest.class);
return TestConfiguration.singleUseDatabaseDecorator(suite);
}
/**
* Test that the user cannot execute any DDL statements on the system tables.
* @throws SQLException
*/
public void testNoUserDDLOnSystemTables() throws SQLException {
Statement s = createStatement();
assertStatementError("X0Y56", s, "drop table sys.systables");
assertStatementError("42X62", s, "drop index sys.sysaliases_index2");
assertStatementError("42X62", s, "create index trash on sys.systables(tableid)");
assertStatementError("42X62", s, "create table sys.usertable(c1 int)");
assertStatementError("42X62", s, "create view sys.userview as select * from sys.systables");
assertStatementError("42X62", s, "alter table sys.systables drop column tablename");
assertStatementError("42X62", s, "alter table sys.systables add column foo int");
assertStatementError("42X62", s, "alter table sys.systables alter column tablename null");
assertStatementError("42X62", s, "alter table sys.systables drop primary key");
s.close();
}
/**
* Test that the system tables cannot be changed by various DML statements.
*
* @throws SQLException
*/
public void testSystemCatalogsNotUpdatable() throws SQLException{
Connection c = getConnection();
Statement s = c.createStatement();
c.setAutoCommit(false);
try{
s.executeUpdate("delete from sys.systables");
} catch (SQLException e)
{
assertSQLState("42Y25", e);
}
try{
s.executeUpdate("update sys.systables set tablename = tablename || 'trash'");
} catch (SQLException e)
{
assertSQLState("42Y25", e);
}
try{
s.executeUpdate("insert into sys.systables select * from sys.systables");
} catch (SQLException e)
{
assertSQLState("42Y25", e);
}
try{
ResultSet rs = s.executeQuery("select tablename from sys.systables for update of tablename");
} catch (SQLException e)
{
assertSQLState("42Y90", e);
}
c.rollback();
c.setAutoCommit(true);
}
/**
* Test various default store properties for the system tables.
*
* @throws SQLException
*/
public void testSystemCatalogStoreProperties() throws SQLException{
Statement s = createStatement();
s.execute("create function gatp(SCH VARCHAR(128), TBL VARCHAR(128)) RETURNS VARCHAR(1000) EXTERNAL NAME 'org.apache.derbyTesting.functionTests.util.TestPropertyInfo.getAllTableProperties' LANGUAGE JAVA PARAMETER STYLE JAVA");
s.execute("create function gaip(SCH VARCHAR(128), TBL VARCHAR(128)) RETURNS VARCHAR(1000) EXTERNAL NAME 'org.apache.derbyTesting.functionTests.util.TestPropertyInfo.getAllIndexProperties' LANGUAGE JAVA PARAMETER STYLE JAVA");
// get the properties for the heaps
ResultSet rs = s.executeQuery("select tablename,gatp('SYS', tablename) from sys.systables order by tablename");
boolean nonEmptyResultSet = false;
String tablename = null;
String sysdummy = "SYSDUMMY1";
String heapResult = "{ derby.storage.initialPages=1, derby.storage.minimumRecordSize=12, derby.storage.pageReservedSpace=0, derby.storage.pageSize=4096, derby.storage.reusableRecordId=false }";
while(rs.next()) {
nonEmptyResultSet = true;
tablename = rs.getString(1);
if (tablename.equals(sysdummy)) {
assertTrue(rs.getString(2).startsWith("{ }"));
} else {
assertTrue(rs.getString(2).startsWith(heapResult));
}
}
assertTrue(nonEmptyResultSet);
rs.close();
// get the properties for the indexes
rs = s.executeQuery("select conglomeratename, gaip('SYS', conglomeratename) from sys.sysconglomerates where isindex order by conglomeratename");
nonEmptyResultSet = false;
String indexResult = "{ derby.storage.initialPages=1, derby.storage.minimumRecordSize=1, derby.storage.pageReservedSpace=0, derby.storage.pageSize=4096, derby.storage.reusableRecordId=true }";
while(rs.next()) {
nonEmptyResultSet = true;
assertTrue(rs.getString(2).startsWith(indexResult));
}
assertTrue(nonEmptyResultSet);
rs.close();
s.close();
}
/**
* Test that each system table has a table type of "S".
*
* @throws SQLException
*/
public void testSystemCatalogTableTypes() throws SQLException {
Statement s = createStatement();
ResultSet rs = s.executeQuery("select TABLENAME, TABLETYPE from sys.systables order by tablename");
boolean nonEmptyResultSet = false;
while(rs.next()) {
nonEmptyResultSet = true;
assertEquals("S", rs.getString(2));
}
assertTrue(nonEmptyResultSet);
rs.close();
s.close();
}
/**
* Check that all the tables for their expected columns.
*
* @throws SQLException
*/
public void testSystemCatalogColumns() throws SQLException {
String [][] expected = {
{"SYSALIASES", "ALIAS", "2", "VARCHAR(128) NOT NULL"},
{"SYSALIASES", "ALIASID", "1", "CHAR(36) NOT NULL"},
{"SYSALIASES", "ALIASINFO", "8", "org.apache.derby.catalog.AliasInfo"},
{"SYSALIASES", "ALIASTYPE", "5", "CHAR(1) NOT NULL"},
{"SYSALIASES", "JAVACLASSNAME", "4", "LONG VARCHAR NOT NULL"},
{"SYSALIASES", "NAMESPACE", "6", "CHAR(1) NOT NULL"},
{"SYSALIASES", "SCHEMAID", "3", "CHAR(36)"},
{"SYSALIASES", "SPECIFICNAME", "9", "VARCHAR(128) NOT NULL"},
{"SYSALIASES", "SYSTEMALIAS", "7", "BOOLEAN NOT NULL"},
{"SYSCHECKS", "CHECKDEFINITION", "2", "LONG VARCHAR NOT NULL"},
{"SYSCHECKS", "CONSTRAINTID", "1", "CHAR(36) NOT NULL"},
{"SYSCHECKS", "REFERENCEDCOLUMNS", "3", "org.apache.derby.catalog.ReferencedColumns NOT NULL"},
{"SYSCOLPERMS", "COLPERMSID", "1", "CHAR(36) NOT NULL"},
{"SYSCOLPERMS", "COLUMNS", "6", "org.apache.derby.iapi.services.io.FormatableBitSet NOT NULL"},
{"SYSCOLPERMS", "GRANTEE", "2", "VARCHAR(128) NOT NULL"},
{"SYSCOLPERMS", "GRANTOR", "3", "VARCHAR(128) NOT NULL"},
{"SYSCOLPERMS", "TABLEID", "4", "CHAR(36) NOT NULL"},
{"SYSCOLPERMS", "TYPE", "5", "CHAR(1) NOT NULL"},
{"SYSCOLUMNS", "AUTOINCREMENTINC", "9", "BIGINT"},
{"SYSCOLUMNS", "AUTOINCREMENTSTART", "8", "BIGINT"},
{"SYSCOLUMNS", "AUTOINCREMENTVALUE", "7", "BIGINT"},
{"SYSCOLUMNS", "COLUMNDATATYPE", "4", "org.apache.derby.catalog.TypeDescriptor NOT NULL"},
{"SYSCOLUMNS", "COLUMNDEFAULT", "5", "java.io.Serializable"},
{"SYSCOLUMNS", "COLUMNDEFAULTID", "6", "CHAR(36)"},
{"SYSCOLUMNS", "COLUMNNAME", "2", "VARCHAR(128) NOT NULL"},
{"SYSCOLUMNS", "COLUMNNUMBER", "3", "INTEGER NOT NULL"},
{"SYSCOLUMNS", "REFERENCEID", "1", "CHAR(36) NOT NULL"},
{"SYSCONGLOMERATES", "CONGLOMERATEID", "8", "CHAR(36) NOT NULL"},
{"SYSCONGLOMERATES", "CONGLOMERATENAME", "4", "VARCHAR(128)"},
{"SYSCONGLOMERATES", "CONGLOMERATENUMBER", "3", "BIGINT NOT NULL"},
{"SYSCONGLOMERATES", "DESCRIPTOR", "6", "org.apache.derby.catalog.IndexDescriptor"},
{"SYSCONGLOMERATES", "ISCONSTRAINT", "7", "BOOLEAN"},
{"SYSCONGLOMERATES", "ISINDEX", "5", "BOOLEAN NOT NULL"},
{"SYSCONGLOMERATES", "SCHEMAID", "1", "CHAR(36) NOT NULL"},
{"SYSCONGLOMERATES", "TABLEID", "2", "CHAR(36) NOT NULL"},
{"SYSCONSTRAINTS", "CONSTRAINTID", "1", "CHAR(36) NOT NULL"},
{"SYSCONSTRAINTS", "CONSTRAINTNAME", "3", "VARCHAR(128) NOT NULL"},
{"SYSCONSTRAINTS", "REFERENCECOUNT", "7", "INTEGER NOT NULL"},
{"SYSCONSTRAINTS", "SCHEMAID", "5", "CHAR(36) NOT NULL"},
{"SYSCONSTRAINTS", "STATE", "6", "CHAR(1) NOT NULL"},
{"SYSCONSTRAINTS", "TABLEID", "2", "CHAR(36) NOT NULL"},
{"SYSCONSTRAINTS", "TYPE", "4", "CHAR(1) NOT NULL"},
{"SYSDEPENDS", "DEPENDENTFINDER", "2", "org.apache.derby.catalog.DependableFinder NOT NULL"},
{"SYSDEPENDS", "DEPENDENTID", "1", "CHAR(36) NOT NULL"},
{"SYSDEPENDS", "PROVIDERFINDER", "4", "org.apache.derby.catalog.DependableFinder NOT NULL"},
{"SYSDEPENDS", "PROVIDERID", "3", "CHAR(36) NOT NULL"},
{"SYSDUMMY1", "IBMREQD", "1", "CHAR(1)"},
{"SYSFILES", "FILEID", "1", "CHAR(36) NOT NULL"},
{"SYSFILES", "FILENAME", "3", "VARCHAR(128) NOT NULL"},
{"SYSFILES", "GENERATIONID", "4", "BIGINT NOT NULL"},
{"SYSFILES", "SCHEMAID", "2", "CHAR(36) NOT NULL"},
{"SYSFOREIGNKEYS", "CONGLOMERATEID", "2", "CHAR(36) NOT NULL"},
{"SYSFOREIGNKEYS", "CONSTRAINTID", "1", "CHAR(36) NOT NULL"},
{"SYSFOREIGNKEYS", "DELETERULE", "4", "CHAR(1) NOT NULL"},
{"SYSFOREIGNKEYS", "KEYCONSTRAINTID", "3", "CHAR(36) NOT NULL"},
{"SYSFOREIGNKEYS", "UPDATERULE", "5", "CHAR(1) NOT NULL"},
{"SYSKEYS", "CONGLOMERATEID", "2", "CHAR(36) NOT NULL"},
{"SYSKEYS", "CONSTRAINTID", "1", "CHAR(36) NOT NULL"},
{"SYSROLES", "GRANTEE", "3", "VARCHAR(128) NOT NULL"},
{"SYSROLES", "GRANTOR", "4", "VARCHAR(128) NOT NULL"},
{"SYSROLES", "ISDEF", "6", "CHAR(1) NOT NULL"},
{"SYSROLES", "ROLEID", "2", "VARCHAR(128) NOT NULL"},
{"SYSROLES", "UUID", "1", "CHAR(36) NOT NULL"},
{"SYSROLES", "WITHADMINOPTION", "5", "CHAR(1) NOT NULL"},
{"SYSROUTINEPERMS", "ALIASID", "4", "CHAR(36) NOT NULL"},
{"SYSROUTINEPERMS", "GRANTEE", "2", "VARCHAR(128) NOT NULL"},
{"SYSROUTINEPERMS", "GRANTOPTION", "5", "CHAR(1) NOT NULL"},
{"SYSROUTINEPERMS", "GRANTOR", "3", "VARCHAR(128) NOT NULL"},
{"SYSROUTINEPERMS", "ROUTINEPERMSID", "1", "CHAR(36) NOT NULL"},
{"SYSSCHEMAS", "AUTHORIZATIONID", "3", "VARCHAR(128) NOT NULL"},
{"SYSSCHEMAS", "SCHEMAID", "1", "CHAR(36) NOT NULL"},
{"SYSSCHEMAS", "SCHEMANAME", "2", "VARCHAR(128) NOT NULL"},
{"SYSSTATEMENTS", "COMPILATIONSCHEMAID", "8", "CHAR(36)"},
{"SYSSTATEMENTS", "LASTCOMPILED", "7", "TIMESTAMP"},
{"SYSSTATEMENTS", "SCHEMAID", "3", "CHAR(36) NOT NULL"},
{"SYSSTATEMENTS", "STMTID", "1", "CHAR(36) NOT NULL"},
{"SYSSTATEMENTS", "STMTNAME", "2", "VARCHAR(128) NOT NULL"},
{"SYSSTATEMENTS", "TEXT", "6", "LONG VARCHAR NOT NULL"},
{"SYSSTATEMENTS", "TYPE", "4", "CHAR(1) NOT NULL"},
{"SYSSTATEMENTS", "USINGTEXT", "9", "LONG VARCHAR"},
{"SYSSTATEMENTS", "VALID", "5", "BOOLEAN NOT NULL"},
{"SYSSTATISTICS", "COLCOUNT", "7", "INTEGER NOT NULL"},
{"SYSSTATISTICS", "CREATIONTIMESTAMP", "4", "TIMESTAMP NOT NULL"},
{"SYSSTATISTICS", "REFERENCEID", "2", "CHAR(36) NOT NULL"},
{"SYSSTATISTICS", "STATID", "1", "CHAR(36) NOT NULL"},
{"SYSSTATISTICS", "STATISTICS", "8", "org.apache.derby.catalog.Statistics NOT NULL"},
{"SYSSTATISTICS", "TABLEID", "3", "CHAR(36) NOT NULL"},
{"SYSSTATISTICS", "TYPE", "5", "CHAR(1) NOT NULL"},
{"SYSSTATISTICS", "VALID", "6", "BOOLEAN NOT NULL"},
{"SYSTABLEPERMS", "DELETEPRIV", "6", "CHAR(1) NOT NULL"},
{"SYSTABLEPERMS", "GRANTEE", "2", "VARCHAR(128) NOT NULL"},
{"SYSTABLEPERMS", "GRANTOR", "3", "VARCHAR(128) NOT NULL"},
{"SYSTABLEPERMS", "INSERTPRIV", "7", "CHAR(1) NOT NULL"},
{"SYSTABLEPERMS", "REFERENCESPRIV", "9", "CHAR(1) NOT NULL"},
{"SYSTABLEPERMS", "SELECTPRIV", "5", "CHAR(1) NOT NULL"},
{"SYSTABLEPERMS", "TABLEID", "4", "CHAR(36) NOT NULL"},
{"SYSTABLEPERMS", "TABLEPERMSID", "1", "CHAR(36) NOT NULL"},
{"SYSTABLEPERMS", "TRIGGERPRIV", "10", "CHAR(1) NOT NULL"},
{"SYSTABLEPERMS", "UPDATEPRIV", "8", "CHAR(1) NOT NULL"},
{"SYSTABLES", "LOCKGRANULARITY", "5", "CHAR(1) NOT NULL"},
{"SYSTABLES", "SCHEMAID", "4", "CHAR(36) NOT NULL"},
{"SYSTABLES", "TABLEID", "1", "CHAR(36) NOT NULL"},
{"SYSTABLES", "TABLENAME", "2", "VARCHAR(128) NOT NULL"},
{"SYSTABLES", "TABLETYPE", "3", "CHAR(1) NOT NULL"},
{"SYSTRIGGERS", "ACTIONSTMTID", "11", "CHAR(36)"},
{"SYSTRIGGERS", "CREATIONTIMESTAMP", "4", "TIMESTAMP NOT NULL"},
{"SYSTRIGGERS", "EVENT", "5", "CHAR(1) NOT NULL"},
{"SYSTRIGGERS", "FIRINGTIME", "6", "CHAR(1) NOT NULL"},
{"SYSTRIGGERS", "NEWREFERENCINGNAME", "17", "VARCHAR(128)"},
{"SYSTRIGGERS", "OLDREFERENCINGNAME", "16", "VARCHAR(128)"},
{"SYSTRIGGERS", "REFERENCEDCOLUMNS", "12", "org.apache.derby.catalog.ReferencedColumns"},
{"SYSTRIGGERS", "REFERENCINGNEW", "15", "BOOLEAN"},
{"SYSTRIGGERS", "REFERENCINGOLD", "14", "BOOLEAN"},
{"SYSTRIGGERS", "SCHEMAID", "3", "CHAR(36) NOT NULL"},
{"SYSTRIGGERS", "STATE", "8", "CHAR(1) NOT NULL"},
{"SYSTRIGGERS", "TABLEID", "9", "CHAR(36) NOT NULL"},
{"SYSTRIGGERS", "TRIGGERDEFINITION", "13", "LONG VARCHAR"},
{"SYSTRIGGERS", "TRIGGERID", "1", "CHAR(36) NOT NULL"},
{"SYSTRIGGERS", "TRIGGERNAME", "2", "VARCHAR(128) NOT NULL"},
{"SYSTRIGGERS", "TYPE", "7", "CHAR(1) NOT NULL"},
{"SYSTRIGGERS", "WHENSTMTID", "10", "CHAR(36)"},
{"SYSVIEWS", "CHECKOPTION", "3", "CHAR(1) NOT NULL"},
{"SYSVIEWS", "COMPILATIONSCHEMAID", "4", "CHAR(36)"},
{"SYSVIEWS", "TABLEID", "1", "CHAR(36) NOT NULL"},
{"SYSVIEWS", "VIEWDEFINITION", "2", "LONG VARCHAR NOT NULL"}
};
Statement s = createStatement();
ResultSet rs = s.executeQuery("select TABLENAME, COLUMNNAME, COLUMNNUMBER, COLUMNDATATYPE from sys.systables t, sys.syscolumns c" +
" where t.TABLEID=c.REFERENCEID order by TABLENAME, COLUMNNAME");
JDBC.assertFullResultSet(rs, expected);
rs.close();
s.close();
}
public void testSystemCatalogIndexes() throws SQLException{
String [][] expected =
{
{"SYSALIASES", "SYSALIASES_HEAP", "false"},
{"SYSALIASES", "SYSALIASES_INDEX3", "true"},
{"SYSALIASES", "SYSALIASES_INDEX2", "true"},
{"SYSALIASES", "SYSALIASES_INDEX1", "true"},
{"SYSCHECKS", "SYSCHECKS_HEAP", "false"},
{"SYSCHECKS", "SYSCHECKS_INDEX1", "true"},
{"SYSCOLPERMS", "SYSCOLPERMS_HEAP", "false"},
{"SYSCOLPERMS", "SYSCOLPERMS_INDEX3", "true"},
{"SYSCOLPERMS", "SYSCOLPERMS_INDEX2", "true"},
{"SYSCOLPERMS", "SYSCOLPERMS_INDEX1", "true"},
{"SYSCOLUMNS", "SYSCOLUMNS_HEAP", "false"},
{"SYSCOLUMNS", "SYSCOLUMNS_INDEX2", "true"},
{"SYSCOLUMNS", "SYSCOLUMNS_INDEX1", "true"},
{"SYSCONGLOMERATES", "SYSCONGLOMERATES_HEAP", "false"},
{"SYSCONGLOMERATES", "SYSCONGLOMERATES_INDEX3", "true"},
{"SYSCONGLOMERATES", "SYSCONGLOMERATES_INDEX2", "true"},
{"SYSCONGLOMERATES", "SYSCONGLOMERATES_INDEX1", "true"},
{"SYSCONSTRAINTS", "SYSCONSTRAINTS_HEAP", "false"},
{"SYSCONSTRAINTS", "SYSCONSTRAINTS_INDEX3", "true"},
{"SYSCONSTRAINTS", "SYSCONSTRAINTS_INDEX2", "true"},
{"SYSCONSTRAINTS", "SYSCONSTRAINTS_INDEX1", "true"},
{"SYSDEPENDS", "SYSDEPENDS_HEAP", "false"},
{"SYSDEPENDS", "SYSDEPENDS_INDEX2", "true"},
{"SYSDEPENDS", "SYSDEPENDS_INDEX1", "true"},
{"SYSDUMMY1", "SYSDUMMY1_HEAP", "false"},
{"SYSFILES", "SYSFILES_HEAP", "false"},
{"SYSFILES", "SYSFILES_INDEX2", "true"},
{"SYSFILES", "SYSFILES_INDEX1", "true"},
{"SYSFOREIGNKEYS", "SYSFOREIGNKEYS_HEAP", "false"},
{"SYSFOREIGNKEYS", "SYSFOREIGNKEYS_INDEX2", "true"},
{"SYSFOREIGNKEYS", "SYSFOREIGNKEYS_INDEX1", "true"},
{"SYSKEYS", "SYSKEYS_HEAP", "false"},
{"SYSKEYS", "SYSKEYS_INDEX1", "true"},
{"SYSROLES", "SYSROLES_HEAP", "false"},
{"SYSROLES", "SYSROLES_INDEX3", "true"},
{"SYSROLES", "SYSROLES_INDEX2", "true"},
{"SYSROLES", "SYSROLES_INDEX1", "true"},
{"SYSROUTINEPERMS", "SYSROUTINEPERMS_HEAP", "false"},
{"SYSROUTINEPERMS", "SYSROUTINEPERMS_INDEX3", "true"},
{"SYSROUTINEPERMS", "SYSROUTINEPERMS_INDEX2", "true"},
{"SYSROUTINEPERMS", "SYSROUTINEPERMS_INDEX1", "true"},
{"SYSSCHEMAS", "SYSSCHEMAS_HEAP", "false"},
{"SYSSCHEMAS", "SYSSCHEMAS_INDEX2", "true"},
{"SYSSCHEMAS", "SYSSCHEMAS_INDEX1", "true"},
{"SYSSTATEMENTS", "SYSSTATEMENTS_HEAP", "false"},
{"SYSSTATEMENTS", "SYSSTATEMENTS_INDEX2", "true"},
{"SYSSTATEMENTS", "SYSSTATEMENTS_INDEX1", "true"},
{"SYSSTATISTICS", "SYSSTATISTICS_HEAP", "false"},
{"SYSSTATISTICS", "SYSSTATISTICS_INDEX1", "true"},
{"SYSTABLEPERMS", "SYSTABLEPERMS_HEAP", "false"},
{"SYSTABLEPERMS", "SYSTABLEPERMS_INDEX3", "true"},
{"SYSTABLEPERMS", "SYSTABLEPERMS_INDEX2", "true"},
{"SYSTABLEPERMS", "SYSTABLEPERMS_INDEX1", "true"},
{"SYSTABLES", "SYSTABLES_HEAP", "false"},
{"SYSTABLES", "SYSTABLES_INDEX2", "true"},
{"SYSTABLES", "SYSTABLES_INDEX1", "true"},
{"SYSTRIGGERS", "SYSTRIGGERS_HEAP", "false"},
{"SYSTRIGGERS", "SYSTRIGGERS_INDEX3", "true"},
{"SYSTRIGGERS", "SYSTRIGGERS_INDEX2", "true"},
{"SYSTRIGGERS", "SYSTRIGGERS_INDEX1", "true"},
{"SYSVIEWS", "SYSVIEWS_HEAP", "false"},
{"SYSVIEWS", "SYSVIEWS_INDEX1", "true"},
};
Statement s = createStatement();
ResultSet rs = s.executeQuery("select TABLENAME, CONGLOMERATENAME, ISINDEX from sys.systables t, sys.sysconglomerates c"
+ " where t.TABLEID=c.TABLEID order by TABLENAME, ISINDEX");
JDBC.assertFullResultSet(rs, expected);
rs.close();
s.close();
}
/**
* Check that a newly created table and its columns appear in SYSTABLES and SYSCOLUMNS
* @throws SQLException
*/
public void testNewTableInSystemCatalogs() throws SQLException {
Statement s = createStatement();
s.execute("create table t (i int, s smallint)");
ResultSet rs = s.executeQuery("select TABLETYPE from sys.systables where tablename = 'T'");
JDBC.assertSingleValueResultSet(rs, "T");
rs.close();
rs = s.executeQuery("select TABLENAME, COLUMNNAME, COLUMNNUMBER, columndatatype from sys.systables t, sys.syscolumns c" +
" where t.TABLEID=c.REFERENCEID and t.tablename = 'T' order by TABLENAME, COLUMNNAME");
String[][] expected = {{"T", "I", "1", "INTEGER"}, {"T", "S", "2", "SMALLINT"}};
JDBC.assertFullResultSet(rs,expected);
rs.close();
rs = s.executeQuery("select TABLENAME, ISINDEX from sys.systables t, sys.sysconglomerates c where t.TABLEID=c.TABLEID and t.TABLENAME = 'T' order by TABLENAME, ISINDEX");
expected = new String[][] {{"T", "false"},};
JDBC.assertFullResultSet(rs,expected);
rs.close();
s.execute("drop table t");
s.close();
}
/**
* Test that table and column names over thirty characters are recorded
* properly in the system tables.
*
* @throws SQLException
*/
public void testOverThirtyCharsInTableName() throws SQLException {
Statement s = createStatement();
s.execute("create table t234567890123456789012345678901234567890 (c234567890123456789012345678901234567890 int)");
ResultSet rs = s.executeQuery("select TABLENAME from sys.systables where length(TABLENAME) > 30 order by tablename");
JDBC.assertSingleValueResultSet(rs, "T234567890123456789012345678901234567890");
rs.close();
rs = s.executeQuery("select COLUMNNAME from sys.syscolumns where {fn length(COLUMNNAME)} > 30 order by columnname");
JDBC.assertSingleValueResultSet(rs, "C234567890123456789012345678901234567890");
rs.close();
s.execute("drop table t234567890123456789012345678901234567890");
s.close();
}
/**
* Test that named constraints and unnamed constraints are recorded in the system tables properly.
*
* @throws SQLException
*/
public void testPrimaryAndUniqueKeysInSystemCatalogs() throws SQLException {
Statement s = createStatement();
String getNamedConstraintsQuery = "select c.constraintname, c.type from sys.sysconstraints c, sys.systables t "
+ "where c.tableid = t.tableid and not t.tablename like 'UNNAMED%' order by c.constraintname";
s.execute("create table primkey1 (c1 int not null constraint prim1 primary key)");
String [][] expected = new String[][] {{"PRIM1", "P"}};
ResultSet rs = s.executeQuery(getNamedConstraintsQuery);
JDBC.assertFullResultSet(rs, expected, true);
rs.close();
s.execute("create table unnamed_primkey2 (c1 int not null primary key)");
rs = s.executeQuery("select c.constraintname, c.type from sys.sysconstraints c, sys.systables t where c.tableid = t.tableid and t.tablename = 'UNNAMED_PRIMKEY2' order by c.constraintname");
assertTrue(rs.next());
assertEquals("P", rs.getString(2));
assertFalse(rs.next());
rs.close();
rs = s.executeQuery(getNamedConstraintsQuery);
JDBC.assertFullResultSet(rs, expected);
rs.close();
s.execute("create table primkey3 (c1 int not null, c2 int not null, constraint prim3 primary key(c2, c1))");
expected = new String[][] {{"PRIM1", "P"}, {"PRIM3", "P"}};
rs = s.executeQuery(getNamedConstraintsQuery);
JDBC.assertFullResultSet(rs, expected);
rs.close();
s.execute("create table uniquekey1 (c1 int not null constraint uniq1 unique)");
expected = new String[][] {{"PRIM1", "P"}, {"PRIM3", "P"}, {"UNIQ1", "U"}};
rs = s.executeQuery(getNamedConstraintsQuery);
JDBC.assertFullResultSet(rs, expected);
rs.close();
s.execute("create table unnamed_uniquekey2 (c1 int not null unique)");
rs = s.executeQuery("select c.constraintname, c.type from sys.sysconstraints c, sys.systables t where c.tableid = t.tableid and t.tablename = 'UNNAMED_UNIQUEKEY2' order by c.constraintname");
assertTrue(rs.next());
assertEquals("U", rs.getString(2));
assertFalse(rs.next());
rs.close();
rs = s.executeQuery(getNamedConstraintsQuery);
JDBC.assertFullResultSet(rs, expected);
rs.close();
s.execute("create table uniquekey3 (c1 int not null, c2 int not null, constraint uniq3 unique(c2, c1))");
expected = new String[][] {{"PRIM1", "P"}, {"PRIM3", "P"}, {"UNIQ1", "U"}, {"UNIQ3", "U"}};
rs = s.executeQuery(getNamedConstraintsQuery);
JDBC.assertFullResultSet(rs, expected);
rs.close();
s.execute("drop table primkey1");
s.execute("drop table unnamed_primkey2");
s.execute("drop table primkey3");
s.execute("drop table uniquekey1");
s.execute("drop table unnamed_uniquekey2");
s.execute("drop table uniquekey3");
s.close();
}
/**
* Test that view creation is recorded in the system tables.
*
* @throws SQLException
*/
public void testViewsOfSystemCatalogs() throws SQLException {
Statement s = createStatement();
s.execute("create table t (i int, s smallint)");
s.execute("create table uniquekey3 (c1 int not null, c2 int not null, constraint uniq3 unique(c2, c1))");
s.execute("create view dummyview as select * from t, uniquekey3");
ResultSet rs = s.executeQuery("select tablename from sys.systables t, sys.sysviews v where t.tableid = v.tableid order by tablename");
JDBC.assertSingleValueResultSet(rs, "DUMMYVIEW");
rs.close();
s.execute("drop view dummyview");
s.execute("drop table t");
s.execute("drop table uniquekey3");
s.close();
}
/**
* Check that column datatypes are reported correctly, both in
* embedded and client/server modes
*
* @throws SQLException
*/
public void testColumnDatatypesInSystemCatalogs() throws SQLException {
Statement s = createStatement();
s.execute("create table decimal_tab (dcol decimal(5,2), ncol numeric(5,2) default 1.0)");
ResultSet rs = s.executeQuery("select columnname, columndatatype from sys.syscolumns where columnname IN ('DCOL', 'NCOL') order by columnname");
//DCOL
assertTrue(rs.next());
assertTrue(rs.getString(2).startsWith("DECIMAL(5,2)"));
//NCOL
assertTrue(rs.next());
assertTrue(rs.getString(2).startsWith("NUMERIC(5,2)"));
assertFalse(rs.next());
rs.close();
s.execute("create index decimal_tab_idx on decimal_tab(dcol)");
rs = s.executeQuery("select conglomeratename, descriptor from sys.sysconglomerates where conglomeratename = 'DECIMAL_TAB_IDX' order by conglomeratename");
assertTrue(rs.next());
assertTrue(rs.getString(2).startsWith("BTREE (1)"));
assertFalse(rs.next());
rs.close();
s.execute("create trigger t1 after update on decimal_tab for each row values 1");
rs = s.executeQuery("select triggername, referencedcolumns from sys.systriggers order by triggername");
assertTrue(rs.next());
assertNull(rs.getString(2));
assertFalse(rs.next());
rs.close();
s.execute("drop trigger t1");
s.execute("drop table decimal_tab");
s.close();
}
/**
* Test for fix of Derby-318, confirm that it is possible to select
* COLUMNDEFAULT from SYSCOLUMNS after a column that is generated by
* default has been added.
*
* @throws SQLException
*/
public void testAutoincrementColumnUpdated() throws SQLException{
Statement s = createStatement();
s.executeUpdate("create table defaultAutoinc(autoinccol int generated by default as identity)");
ResultSet rs = s.executeQuery("select COLUMNDEFAULT from SYS.SYSCOLUMNS where COLUMNNAME = 'AUTOINCCOL'");
assertTrue(rs.next());
// Before Derby-318, this next call would have failed with an NPE
Object o = rs.getObject(1);
if (! (o instanceof java.io.Serializable)) {
fail("SystemCatalogTest: invalid Object type for SYSCOLUMNS.COLUMNDEFAULT");
}
assertFalse(rs.next());
rs.close();
s.executeUpdate("drop table defaultAutoinc");
s.close();
}
/**
* Run SYSCS_UTIL.SYSCS_CHECK_TABLE on each system table.
*
* @throws SQLException
*/
public void testCheckConsistencyOfSystemCatalogs() throws SQLException {
Statement s = createStatement();
ResultSet rs = s.executeQuery("select tablename, SYSCS_UTIL.SYSCS_CHECK_TABLE('SYS', tablename)from sys.systables where tabletype = 'S' and tablename != 'SYSDUMMY1' order by tablename");
boolean nonEmptyResultSet = false;
while(rs.next()) {
nonEmptyResultSet = true;
assertEquals(rs.getInt(2), 1);
}
assertTrue(nonEmptyResultSet);
rs.close();
s.close();
}
}
|
package io.swagger.client;
@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2015-09-11T11:35:51.678+08:00")
public class StringUtil {
/**
* Check if the given array contains the given value (with case-insensitive comparison).
*
* @param array The array
* @param value The value to search
* @return true if the array contains the value
*/
public static boolean containsIgnoreCase(String[] array, String value) {
for (String str : array) {
if (value == null && str == null) return true;
if (value != null && value.equalsIgnoreCase(str)) return true;
}
return false;
}
/**
* Join an array of strings with the given separator.
* <p>
* Note: This might be replaced by utility method from commons-lang or guava someday
* if one of those libraries is added as dependency.
* </p>
*
* @param array The array of strings
* @param separator The separator
* @return the resulting string
*/
public static String join(String[] array, String separator) {
int len = array.length;
if (len == 0) return "";
StringBuilder out = new StringBuilder();
out.append(array[0]);
for (int i = 1; i < len; i++) {
out.append(separator).append(array[i]);
}
return out.toString();
}
/**
* Convert the given object to string with each line indented by 4 spaces
* (except the first line).
*/
public static String toIndentedString(Object o) {
if (o == null) return "null";
return o.toString().replace("\n", "\n ");
}
}
|
package org.hl7.fhir.dstu2016may.model.codesystems;
/*
Copyright (c) 2011+, HL7, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of HL7 nor the names of its contributors may be used to
endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
// Generated on Sun, May 8, 2016 03:05+1000 for FHIR v1.4.0
import org.hl7.fhir.exceptions.FHIRException;
public enum IssueSeverity {
/**
* The issue caused the action to fail, and no further checking could be performed.
*/
FATAL,
/**
* The issue is sufficiently important to cause the action to fail.
*/
ERROR,
/**
* The issue is not important enough to cause the action to fail, but may cause it to be performed suboptimally or in a way that is not as desired.
*/
WARNING,
/**
* The issue has no relation to the degree of success of the action.
*/
INFORMATION,
/**
* added to help the parsers
*/
NULL;
public static IssueSeverity fromCode(String codeString) throws FHIRException {
if (codeString == null || "".equals(codeString))
return null;
if ("fatal".equals(codeString))
return FATAL;
if ("error".equals(codeString))
return ERROR;
if ("warning".equals(codeString))
return WARNING;
if ("information".equals(codeString))
return INFORMATION;
throw new FHIRException("Unknown IssueSeverity code '"+codeString+"'");
}
public String toCode() {
switch (this) {
case FATAL: return "fatal";
case ERROR: return "error";
case WARNING: return "warning";
case INFORMATION: return "information";
default: return "?";
}
}
public String getSystem() {
return "http://hl7.org/fhir/issue-severity";
}
public String getDefinition() {
switch (this) {
case FATAL: return "The issue caused the action to fail, and no further checking could be performed.";
case ERROR: return "The issue is sufficiently important to cause the action to fail.";
case WARNING: return "The issue is not important enough to cause the action to fail, but may cause it to be performed suboptimally or in a way that is not as desired.";
case INFORMATION: return "The issue has no relation to the degree of success of the action.";
default: return "?";
}
}
public String getDisplay() {
switch (this) {
case FATAL: return "Fatal";
case ERROR: return "Error";
case WARNING: return "Warning";
case INFORMATION: return "Information";
default: return "?";
}
}
}
|
package ezvcard.io;
import ezvcard.VCardDataType;
import ezvcard.VCardVersion;
import ezvcard.io.scribe.VCardPropertyScribe;
import ezvcard.io.text.WriteContext;
import ezvcard.io.xml.XCardElement;
import ezvcard.parameter.VCardParameters;
import ezvcard.property.VCardProperty;
/*
Copyright (c) 2012-2018, Michael Angstadt
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
*/
/**
* An extended type class used for testing that contains XML marshalling
* methods, but not a QName.
* @author Michael Angstadt
*/
public class SalaryProperty extends VCardProperty {
public int salary;
public SalaryProperty(int salary) {
this.salary = salary;
}
public static class SalaryScribe extends VCardPropertyScribe<SalaryProperty> {
public SalaryScribe() {
super(SalaryProperty.class, "X-SALARY");
}
@Override
protected VCardDataType _defaultDataType(VCardVersion version) {
return VCardDataType.INTEGER;
}
@Override
protected String _writeText(SalaryProperty property, WriteContext context) {
return property.salary + "";
}
@Override
protected SalaryProperty _parseText(String value, VCardDataType dataType, VCardParameters parameters, ParseContext context) {
return new SalaryProperty(Integer.parseInt(value));
}
@Override
protected void _writeXml(SalaryProperty property, XCardElement parent) {
parent.element().setTextContent(property.salary + "");
}
@Override
protected SalaryProperty _parseXml(XCardElement element, VCardParameters parameters, ParseContext context) {
return new SalaryProperty(Integer.parseInt(element.first(VCardDataType.INTEGER)));
}
}
}
|
/*
* #%L
* SciJava Common shared library for SciJava software.
* %%
* Copyright (C) 2009 - 2017 Board of Regents of the University of
* Wisconsin-Madison, Broad Institute of MIT and Harvard, Max Planck
* Institute of Molecular Cell Biology and Genetics, University of
* Konstanz, and KNIME GmbH.
* %%
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* #L%
*/
package org.scijava.main;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.List;
import org.scijava.log.LogService;
import org.scijava.plugin.Parameter;
import org.scijava.plugin.Plugin;
import org.scijava.service.AbstractService;
import org.scijava.service.Service;
import org.scijava.util.Types;
/**
* Default implementation of {@link MainService}.
*
* @author Curtis Rueden
*/
@Plugin(type = Service.class)
public class DefaultMainService extends AbstractService implements MainService {
@Parameter(required = false)
private LogService log;
private final List<Main> mains = new ArrayList<>();
@Override
public int execMains() {
int mainCount = 0;
for (final Main main : mains) {
main.exec();
mainCount++;
}
return mainCount;
}
@Override
public void addMain(final String className, final String... args) {
mains.add(new DefaultMain(className, args));
}
@Override
public Main[] getMains() {
return mains.toArray(new Main[mains.size()]);
}
// -- Helper classes --
/** Default implementation of {@link MainService.Main}. */
private class DefaultMain implements Main {
private String className;
private String[] args;
public DefaultMain(final String className, final String... args) {
this.className = className;
this.args = args.clone();
}
@Override
public String className() {
return className;
}
@Override
public String[] args() {
return args;
}
@Override
public void exec() {
try {
final Class<?> mainClass = Types.load(className, false);
final Method main = mainClass.getMethod("main", String[].class);
main.invoke(null, new Object[] { args });
}
catch (final IllegalArgumentException exc) {
if (log != null) log.error(exc);
}
catch (final NoSuchMethodException exc) {
if (log != null) {
log.error("No main method for class: " + className, exc);
}
}
catch (final IllegalAccessException exc) {
if (log != null) log.error(exc);
}
catch (final InvocationTargetException exc) {
if (log != null) log.error(exc);
}
}
}
}
|
package com.zrlog.admin.business.rest.base;
public class UpdateTypeRequest {
private Long id;
private String typeName;
private String remark;
private String alias;
public String getTypeName() {
return typeName;
}
public void setTypeName(String typeName) {
this.typeName = typeName;
}
public String getRemark() {
return remark;
}
public void setRemark(String remark) {
this.remark = remark;
}
public String getAlias() {
return alias;
}
public void setAlias(String alias) {
this.alias = alias;
}
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
}
|
package com.hazeluff.discord.nhlbot.bot.command;
import com.hazeluff.discord.nhlbot.bot.NHLBot;
import com.hazeluff.discord.nhlbot.nhl.Game;
import com.hazeluff.discord.nhlbot.nhl.GameStatus;
import com.hazeluff.discord.nhlbot.nhl.Team;
import sx.blah.discord.handle.obj.IChannel;
import sx.blah.discord.handle.obj.IGuild;
import sx.blah.discord.handle.obj.IMessage;
/**
* Displays the score of a game in a Game Day Channel.
*/
public class ScoreCommand extends Command {
public ScoreCommand(NHLBot nhlBot) {
super(nhlBot);
}
@Override
public void replyTo(IMessage message, String[] arguments) {
IChannel channel = message.getChannel();
IGuild guild = message.getGuild();
Team preferredTeam;
if (channel.isPrivate()) {
nhlBot.getDiscordManager().sendMessage(channel, RUN_IN_SERVER_CHANNEL_MESSAGE);
} else if ((preferredTeam = nhlBot.getPreferencesManager().getTeamByGuild(guild.getLongID())) == null) {
nhlBot.getDiscordManager().sendMessage(channel, SUBSCRIBE_FIRST_MESSAGE);
} else {
Game game = nhlBot.getGameScheduler().getGameByChannelName(channel.getName());
if (game == null) {
nhlBot.getDiscordManager().sendMessage(channel, getRunInGameDayChannelMessage(guild, preferredTeam));
} else if (game.getStatus() == GameStatus.PREVIEW) {
nhlBot.getDiscordManager().sendMessage(channel, GAME_NOT_STARTED_MESSAGE);
} else {
nhlBot.getDiscordManager().sendMessage(channel, game.getScoreMessage());
}
}
}
@Override
public boolean isAccept(String[] arguments) {
return arguments[1].equalsIgnoreCase("score");
}
}
|
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.chemistry.opencmis.client.bindings.spi.local;
import java.math.BigInteger;
import org.apache.chemistry.opencmis.client.bindings.spi.BindingSession;
import org.apache.chemistry.opencmis.commons.data.ExtensionsData;
import org.apache.chemistry.opencmis.commons.data.ObjectList;
import org.apache.chemistry.opencmis.commons.enums.RelationshipDirection;
import org.apache.chemistry.opencmis.commons.server.CmisService;
import org.apache.chemistry.opencmis.commons.server.CmisServiceFactory;
import org.apache.chemistry.opencmis.commons.spi.RelationshipService;
public class RelationshipServiceImpl extends AbstractLocalService implements RelationshipService {
/**
* Constructor.
*/
public RelationshipServiceImpl(BindingSession session, CmisServiceFactory factory) {
setSession(session);
setServiceFactory(factory);
}
public ObjectList getObjectRelationships(String repositoryId, String objectId, Boolean includeSubRelationshipTypes,
RelationshipDirection relationshipDirection, String typeId, String filter, Boolean includeAllowableActions,
BigInteger maxItems, BigInteger skipCount, ExtensionsData extension) {
CmisService service = getService(repositoryId);
try {
if (stopBeforeService(service)) {
return null;
}
ObjectList serviceResult = service.getObjectRelationships(repositoryId, objectId,
includeSubRelationshipTypes, relationshipDirection, typeId, filter, includeAllowableActions,
maxItems, skipCount, extension);
if (stopAfterService(service)) {
return null;
}
return serviceResult;
} finally {
service.close();
}
}
}
|
package models.Signal.SkillSignal;
import models.Decal.CircleDash;
import models.Decal.Decal;
import models.Decal.FireBallDecal;
import models.Entity.Entity;
import models.Map.Map;
import models.Skill.Skill;
import utilities.Location.Location;
import java.util.Timer;
import java.util.TimerTask;
public class LinearSkillSignal extends SkillSignal {
public LinearSkillSignal(Map map, Entity avatar, Skill skill) {
super(map, avatar, skill);
}
protected void useSkill(Skill skill) {
Decal decal = new CircleDash();
if (skill.canUseSkill(avatar)) {
Timer t = new Timer();
t.schedule(new TimerTask() {
public void run() {
currentRadius++;
map.getTileAt(avatarLocation).removeDecal();
Location nextLocation = direction.getNextLocation(avatarLocation);
if (!map.isOutOfBound(nextLocation)) {
if (map.getTileAt(nextLocation).isMountain()) {
currentRadius = radius;
} else {
map.getTileAt(nextLocation).addDecal(decal);
Entity entityToAttack = map.getTileAt(nextLocation).getEntity();
skill.activate(entityToAttack);
checkForExperienceGain(entityToAttack);
avatarLocation = nextLocation;
}
}
if (currentRadius == radius) {
map.getTileAt(avatarLocation).removeDecal();
t.cancel();
}
}
}, 0, 1000);
}
}
}
|
/*
* FDBRecordStoreBase.java
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2015-2018 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.apple.foundationdb.record.provider.foundationdb;
import com.apple.foundationdb.annotation.API;
import com.apple.foundationdb.record.EndpointType;
import com.apple.foundationdb.record.EvaluationContext;
import com.apple.foundationdb.record.ExecuteProperties;
import com.apple.foundationdb.record.ExecuteState;
import com.apple.foundationdb.record.IndexEntry;
import com.apple.foundationdb.record.IndexScanType;
import com.apple.foundationdb.record.IndexState;
import com.apple.foundationdb.record.IsolationLevel;
import com.apple.foundationdb.record.PipelineOperation;
import com.apple.foundationdb.record.RecordCoreException;
import com.apple.foundationdb.record.RecordCoreStorageException;
import com.apple.foundationdb.record.RecordCursor;
import com.apple.foundationdb.record.RecordFunction;
import com.apple.foundationdb.record.RecordIndexUniquenessViolation;
import com.apple.foundationdb.record.RecordMetaDataBuilder;
import com.apple.foundationdb.record.RecordMetaDataProvider;
import com.apple.foundationdb.record.ScanProperties;
import com.apple.foundationdb.record.TupleRange;
import com.apple.foundationdb.record.logging.LogMessageKeys;
import com.apple.foundationdb.record.metadata.Index;
import com.apple.foundationdb.record.metadata.IndexAggregateFunction;
import com.apple.foundationdb.record.metadata.IndexRecordFunction;
import com.apple.foundationdb.record.metadata.Key;
import com.apple.foundationdb.record.metadata.RecordType;
import com.apple.foundationdb.record.metadata.StoreRecordFunction;
import com.apple.foundationdb.record.metadata.expressions.EmptyKeyExpression;
import com.apple.foundationdb.record.metadata.expressions.KeyExpression;
import com.apple.foundationdb.record.provider.common.RecordSerializer;
import com.apple.foundationdb.record.provider.foundationdb.keyspace.KeySpacePath;
import com.apple.foundationdb.record.provider.foundationdb.storestate.FDBRecordStoreStateCache;
import com.apple.foundationdb.record.query.ParameterRelationshipGraph;
import com.apple.foundationdb.record.query.RecordQuery;
import com.apple.foundationdb.record.query.expressions.QueryComponent;
import com.apple.foundationdb.record.query.plan.RecordQueryPlanner;
import com.apple.foundationdb.record.query.plan.plans.RecordQueryPlan;
import com.apple.foundationdb.subspace.Subspace;
import com.apple.foundationdb.tuple.Tuple;
import com.apple.foundationdb.tuple.TupleHelpers;
import com.google.protobuf.Message;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Executor;
/**
* Base interface for typed and untyped record stores.
*
* This interface is the main front-end for most operations inserting, modifying, or querying data through
* the Record Layer. A record store combines:
*
* <ul>
* <li>A {@link Subspace} (often specified via a {@link KeySpacePath})</li>
* <li>The {@link com.apple.foundationdb.record.RecordMetaData RecordMetaData} associated with the data stored with the data in that subspace</li>
* <li>An {@link FDBRecordContext} which wraps a FoundationDB {@link com.apple.foundationdb.Transaction Transaction}</li>
* </ul>
*
* <p>
* All of the record store's data—including index data—are stored durably within the given subspace. Note that
* the meta-data is <em>not</em> stored by the record store directly. However, information about the store's current meta-data
* version is persisted with the store to detect when the meta-data have changed and to know if any action needs to be taken
* to begin using the new meta-data. (For example, new indexes might need to be built and removed indexes deleted.) The same
* meta-data may be used for multiple record stores, and separating the meta-data from the data makes updating the shared
* meta-data simpler as it only needs to be updated in one place. The {@link FDBMetaDataStore} may be used if one wishes
* to persist the meta-data into a FoundationDB cluster.
* </p>
*
* <p>
* All operations conducted by a record store are conducted within the lifetime single transaction, and no data is persisted
* to the database until the transaction is committed by calling {@link FDBRecordContext#commit()} or
* {@link FDBRecordContext#commitAsync()}. Record Layer transactions inherit all of the guarantees and limitations of
* the transactions exposed by FoundationDB, including their durability and consistency guarantees as well as size and
* duration limits. See the FoundationDB <a href="https://apple.github.io/foundationdb/known-limitations.html">known limitations</a>
* for more details.
* </p>
*
* <p>
* The record store also allows the user to tweak additional parameters such as what the parallelism of pipelined operations
* should be (through the {@link PipelineSizer}) and what serializer should be used to read and write data to the database.
* See the {@link BaseBuilder} interface for more details.
* </p>
*
* @param <M> type used to represent stored records
* @see FDBRecordStore
* @see FDBTypedRecordStore
*/
@API(API.Status.MAINTAINED)
public interface FDBRecordStoreBase<M extends Message> extends RecordMetaDataProvider {
/**
* Get the untyped record store associated with this possibly typed store.
* @return an untyped record store
*/
FDBRecordStore getUntypedRecordStore();
/**
* Get a typed record store using the given typed serializer.
* @param <N> the type for the new record store
* @param serializer typed serializer to use
* @return a new typed record store
*/
default <N extends Message> FDBTypedRecordStore<N> getTypedRecordStore(@Nonnull RecordSerializer<N> serializer) {
return new FDBTypedRecordStore<>(getUntypedRecordStore(), serializer);
}
/**
* Get the record context (transaction) to use for the record store.
* @return context the record context / transaction to use
*/
@Nonnull
FDBRecordContext getContext();
@Nonnull
default Executor getExecutor() {
return getContext().getExecutor();
}
@Nullable
default FDBStoreTimer getTimer() {
return getContext().getTimer();
}
/**
* Get the subspace provider.
* @return the subspace provider
*/
@Nullable
SubspaceProvider getSubspaceProvider();
/**
* Get the serializer used to convert records into byte arrays.
* @return the serializer to use
*/
@Nonnull
RecordSerializer<M> getSerializer();
/**
* Hook for checking if store state for client changes.
*/
interface UserVersionChecker {
/**
* Check the user version.
* @param oldUserVersion the old user version or <code>-1</code> if this is a new record store
* @param oldMetaDataVersion the old meta-data version
* @param metaData the meta-data provider that will be used to get meta-data
* @return the user version to store in the record info header
*/
CompletableFuture<Integer> checkUserVersion(int oldUserVersion, int oldMetaDataVersion,
RecordMetaDataProvider metaData);
/**
* Determine what to do about an index needing to be built. When a {@link FDBRecordStore} is opened,
* this method will be called on any index that has been added to the
* {@link com.apple.foundationdb.record.RecordMetaData RecordMetaData} since the last time the record store was
* opened. The index will then be initialized with the {@link IndexState} returned, which in turn determines
* whether the index must be maintained during record inserts and deletes and also whether the index
* can be read (for queries, for example). In general, an index is only really useful if it is
* {@link IndexState#READABLE}, but if this method returns {@link IndexState#READABLE}, then the
* index must be built in the same transaction that opens the record store, which can lead to errors
* on large stores if the index cannot be built in the
* <a href="https://apple.github.io/foundationdb/known-limitations.html#long-running-transactions">five second
* FoundationDB transaction time limit</a>.
*
* <p>
* By default, this will return {@link IndexState#READABLE} for any indexes on new types (which
* can be used right away without doing any I/O) or if the number of records in the store is small (below
* {@link FDBRecordStore#MAX_RECORDS_FOR_REBUILD}). However, if the record store is large, this will return
* {@link IndexState#DISABLED}, which indicates that the index should not be maintained and that it cannot be
* used until the index is built by the {@link OnlineIndexer}.
* </p>
*
* <p>
* For adopters, two utility methods are provided that can be used to make implementing this method easier.
* The first is {@link FDBRecordStore#disabledIfTooManyRecordsForRebuild(long, boolean)}, which replicates
* the default behavior. The second is
* {@link FDBRecordStore#writeOnlyIfTooManyRecordsForRebuild(long, boolean)}, which is similar to the default
* except that it returns {@link IndexState#WRITE_ONLY} instead of {@link IndexState#DISABLED} and was the
* default prior to Record Layer 3.0. Note that all indexes must be made {@link IndexState#WRITE_ONLY} before
* they can be built, but the {@link OnlineIndexer} should generally handle that index state transition, and so
* most adopters should return {@link IndexState#DISABLED} on indexes that cannot be built in-line.
* </p>
*
* @param index the index that has not been built for this store
* @param recordCount the number of records already in the store
* @param indexOnNewRecordTypes <code>true</code> if all record types for the index are new (the number of
* records related to this index is 0), in which case the index is able to be
* "rebuilt" instantly with no cost.
* @return the desired state of the new index. If this is {@link IndexState#READABLE}, the index will be built right away
* @see FDBRecordStore#disabledIfTooManyRecordsForRebuild(long, boolean)
* @see FDBRecordStore#writeOnlyIfTooManyRecordsForRebuild(long, boolean)
*/
default IndexState needRebuildIndex(Index index, long recordCount, boolean indexOnNewRecordTypes) {
return FDBRecordStore.disabledIfTooManyRecordsForRebuild(recordCount, indexOnNewRecordTypes);
}
}
/**
* Action to take if the record store does / does not already exist.
* @see FDBRecordStore.Builder#createOrOpenAsync(FDBRecordStoreBase.StoreExistenceCheck)
*/
enum StoreExistenceCheck {
/**
* No special action.
*
* This should be used with care, since if the record store already has records, there is
* no guarantee that they were written at the current versions (meta-data and format).
* It is really only appropriate in development when switching from {@code uncheckedOpen}
* or {@code build} to a checked open.
*/
NONE,
/**
* Throw if the record store does not have an info header but does have have at least one
* record. This differs from {@link #ERROR_IF_NO_INFO_AND_NOT_EMPTY} in that there is
* data stored in the record store other than just the records and the indexes, including
* meta-data about which indexes have been built. A record store that is missing a header
* but has this other data is in a corrupt state, but as there are no records, it can be
* recovered when creating the store in a straightforward way.
*/
ERROR_IF_NO_INFO_AND_HAS_RECORDS_OR_INDEXES,
/**
* Throw if the record store does not have an info header but is not empty. Unlike with
* {@link #ERROR_IF_NO_INFO_AND_HAS_RECORDS_OR_INDEXES}, this existence check will throw an
* error even if there are no records in the store, only data stored internally by the
* Record Layer.
*
* This corresponds to {@link FDBRecordStore.Builder#createOrOpen}
*/
ERROR_IF_NO_INFO_AND_NOT_EMPTY,
/**
* Throw if the record store already exists.
*
* This corresponds to {@link FDBRecordStore.Builder#create}
* @see RecordStoreAlreadyExistsException
*/
ERROR_IF_EXISTS,
/**
* Throw if the record store does not already exist.
*
* This corresponds to {@link FDBRecordStore.Builder#open}
* @see RecordStoreDoesNotExistException
*/
ERROR_IF_NOT_EXISTS
}
/**
* Action to take if the record being saved does / does not already exist.
* @see FDBRecordStoreBase#saveRecordAsync(Message, RecordExistenceCheck)
*/
enum RecordExistenceCheck {
/**
* No special action.
*
* This corresponds to {@link FDBRecordStoreBase#saveRecord}
*/
NONE,
/**
* Throw if the record already exists.
*
* This corresponds to {@link FDBRecordStoreBase#insertRecord}
* @see RecordAlreadyExistsException
*/
ERROR_IF_EXISTS,
/**
* Throw if the record does not already exist.
*
* @see RecordDoesNotExistException
*/
ERROR_IF_NOT_EXISTS,
/**
* Throw if an existing record has a different record type.
*
* @see RecordTypeChangedException
*/
ERROR_IF_RECORD_TYPE_CHANGED,
/**
* Throw if the record does not already exist or has a different record type.
*
* This corresponds to {@link FDBRecordStoreBase#updateRecord}
* @see RecordDoesNotExistException
* @see RecordTypeChangedException
*/
ERROR_IF_NOT_EXISTS_OR_RECORD_TYPE_CHANGED;
public boolean errorIfExists() {
return this == ERROR_IF_EXISTS;
}
public boolean errorIfNotExists() {
return this == ERROR_IF_NOT_EXISTS || this == ERROR_IF_NOT_EXISTS_OR_RECORD_TYPE_CHANGED;
}
public boolean errorIfTypeChanged() {
return this == ERROR_IF_RECORD_TYPE_CHANGED || this == ERROR_IF_NOT_EXISTS_OR_RECORD_TYPE_CHANGED;
}
}
/**
* Provided during record save (via {@link #saveRecord(Message, FDBRecordVersion, VersionstampSaveBehavior)}),
* directs the behavior of the save w.r.t. the record's version.
* In the presence of a version, either {@code DEFAULT} or {@code WITH_VERSION} can be used.
* For safety, <code>NO_VERSION</code> should only be used with a null version.
*/
enum VersionstampSaveBehavior {
/**
* Match the behavior dictated by the meta-data. If {@link com.apple.foundationdb.record.RecordMetaData#isStoreRecordVersions()}
* returns {@code true}, this will always store the record with a version (like {@link #WITH_VERSION}). Otherwise,
* it will store the record with the provided version if given or with no version if not.
*/
DEFAULT,
/**
* Do not save the record with a version. If a non-null version is provided to {@link #saveRecord(Message, FDBRecordVersion, VersionstampSaveBehavior)},
* then an error will be thrown.
*/
NO_VERSION,
/**
* Always save the record with a version. If a null version is provided, then the record store will chose
* a new version.
*
* <p>
* Note: due to <a href="https://github.com/FoundationDB/fdb-record-layer/issues/964">Issue #964</a>, on some
* older record stores, namely those that were originally created with a {@linkplain FDBRecordStore#getFormatVersion()
* format version} below {@link FDBRecordStore#SAVE_VERSION_WITH_RECORD_FORMAT_VERSION}, records written with a
* version on stores where {@link com.apple.foundationdb.record.RecordMetaData#isStoreRecordVersions()} is
* {@code false} will not return the version with a record when read, even though the version will be stored.
* Users can avoid this by either migrating data to a new store or by setting {@code isStoreRecordVersions()}
* to {@code true} in the meta-data and then supplying the {@link #NO_VERSION} when saving any records that
* do not need an associated version.
* </p>
*/
WITH_VERSION,
/**
* Save a record with a version if and only if a non-null version is passed to {@link #saveRecord(Message, FDBRecordVersion, VersionstampSaveBehavior)}.
* In this mode, the record store will never assign a version to the record, but it will always use the
* version provided (or store the record with no version if {@code null}). This is useful if one is copying
* data from one record store to another and one wants to preserve the versions (including non-versions) for each
* record.
*/
IF_PRESENT,
}
/**
* Async version of {@link #saveRecord(Message)}.
* @param record the record to save
* @return a future that completes with the stored record form of the saved record
*/
@Nonnull
default CompletableFuture<FDBStoredRecord<M>> saveRecordAsync(@Nonnull final M record) {
return saveRecordAsync(record, (FDBRecordVersion)null);
}
/**
* Async version of {@link #saveRecord(Message, RecordExistenceCheck)}.
* @param record the record to save
* @param existenceCheck when to throw an exception if a record with the same primary key does or does not already exist
* @return a future that completes with the stored record form of the saved record
*/
@Nonnull
default CompletableFuture<FDBStoredRecord<M>> saveRecordAsync(@Nonnull final M record, @Nonnull RecordExistenceCheck existenceCheck) {
return saveRecordAsync(record, existenceCheck, null, VersionstampSaveBehavior.DEFAULT);
}
/**
* Async version of {@link #saveRecord(Message, FDBRecordVersion)}.
* @param record the record to save
* @param version the associated record version
* @return a future that completes with the stored record form of the saved record
*/
@Nonnull
default CompletableFuture<FDBStoredRecord<M>> saveRecordAsync(@Nonnull final M record, @Nullable FDBRecordVersion version) {
return saveRecordAsync(record, version, VersionstampSaveBehavior.DEFAULT);
}
/**
* Async version of {@link #saveRecord(Message, FDBRecordVersion, VersionstampSaveBehavior)}.
* @param record the record to save
* @param version the associated record version
* @param behavior the save behavior w.r.t. the given <code>version</code>
* @return a future that completes with the stored record form of the saved record
*/
@Nonnull
default CompletableFuture<FDBStoredRecord<M>> saveRecordAsync(@Nonnull final M record, @Nullable FDBRecordVersion version, @Nonnull final VersionstampSaveBehavior behavior) {
return saveRecordAsync(record, RecordExistenceCheck.NONE, version, behavior);
}
/**
* Async version of {@link #saveRecord(Message, RecordExistenceCheck, FDBRecordVersion, VersionstampSaveBehavior)}.
* @param record the record to save
* @param existenceCheck when to throw an exception if a record with the same primary key does or does not already exist
* @param version the associated record version
* @param behavior the save behavior w.r.t. the given <code>version</code>
* @return a future that completes with the stored record form of the saved record
*/
@Nonnull
CompletableFuture<FDBStoredRecord<M>> saveRecordAsync(@Nonnull M record, @Nonnull RecordExistenceCheck existenceCheck,
@Nullable FDBRecordVersion version, @Nonnull VersionstampSaveBehavior behavior);
/**
* Save the given record.
* @param record the record to be saved
* @return wrapping object containing saved record and metadata
*/
@Nonnull
default FDBStoredRecord<M> saveRecord(@Nonnull final M record) {
return saveRecord(record, (FDBRecordVersion)null);
}
/**
* Save the given record.
* @param record the record to be saved
* @param existenceCheck when to throw an exception if a record with the same primary key does or does not already exist
* @return wrapping object containing saved record and metadata
*/
@Nonnull
default FDBStoredRecord<M> saveRecord(@Nonnull final M record, @Nonnull RecordExistenceCheck existenceCheck) {
return saveRecord(record, existenceCheck, null, VersionstampSaveBehavior.DEFAULT);
}
/**
* Save the given record with a specific version. If <code>null</code>
* is passed for <code>version</code>, then a new version is
* created that will be unique for this record.
* @param record the record to be saved
* @param version the version to associate with the record when saving
* @return wrapping object containing saved record and metadata
*/
@Nonnull
default FDBStoredRecord<M> saveRecord(@Nonnull final M record, @Nullable final FDBRecordVersion version) {
return saveRecord(record, version, VersionstampSaveBehavior.DEFAULT);
}
/**
* Save the given record with a specific version.
* The version is handled according to the behavior value. If behavior is <code>DEFAULT</code> then
* the method acts as {@link #saveRecord(Message, FDBRecordVersion)}. If behavior is <code>NO_VERSION</code> then
* <code>version</code> is ignored and no version is saved. If behavior is <code>WITH_VERSION</code> then the value
* of <code>version</code> is stored as given by the caller.
* @param record the record to be saved
* @param version the version to associate with the record when saving
* @param behavior the save behavior w.r.t. the given <code>version</code>
* @return wrapping object containing saved record and metadata
*/
@Nonnull
default FDBStoredRecord<M> saveRecord(@Nonnull final M record, @Nullable final FDBRecordVersion version, @Nonnull final VersionstampSaveBehavior behavior) {
return saveRecord(record, RecordExistenceCheck.NONE, version, behavior);
}
/**
* Save the given record with a specific version.
* The version is handled according to the behavior value. If behavior is <code>DEFAULT</code> then
* the method acts as {@link #saveRecord(Message, FDBRecordVersion)}. If behavior is <code>NO_VERSION</code> then
* <code>version</code> is ignored and no version is saved. If behavior is <code>WITH_VERSION</code> then the value
* of <code>version</code> is stored as given by the caller.
* @param record the record to be saved
* @param existenceCheck when to throw an exception if a record with the same primary key does or does not already exist
* @param version the version to associate with the record when saving
* @param behavior the save behavior w.r.t. the given <code>version</code>
* @return wrapping object containing saved record and metadata
*/
@Nonnull
default FDBStoredRecord<M> saveRecord(@Nonnull final M record, @Nonnull RecordExistenceCheck existenceCheck,
@Nullable final FDBRecordVersion version, @Nonnull final VersionstampSaveBehavior behavior) {
return getContext().asyncToSync(FDBStoreTimer.Waits.WAIT_SAVE_RECORD, saveRecordAsync(record, existenceCheck, version, behavior));
}
/**
* Save the given record and throw an exception if a record already exists with the same primary key.
* @param record the record to be saved
* @return a future that completes with the stored record form of the saved record
*/
@Nonnull
default CompletableFuture<FDBStoredRecord<M>> insertRecordAsync(@Nonnull final M record) {
return saveRecordAsync(record, RecordExistenceCheck.ERROR_IF_EXISTS);
}
/**
* Save the given record and throw an exception if a record already exists with the same primary key.
* @param record the record to be saved
* @return wrapping object containing saved record and metadata
*/
@Nonnull
default FDBStoredRecord<M> insertRecord(@Nonnull final M record) {
return saveRecord(record, RecordExistenceCheck.ERROR_IF_EXISTS);
}
/**
* Save the given record and throw an exception if the record does not already exist in the database.
* @param record the record to be saved
* @return a future that completes with the stored record form of the saved record
*/
@Nonnull
default CompletableFuture<FDBStoredRecord<M>> updateRecordAsync(@Nonnull final M record) {
return saveRecordAsync(record, RecordExistenceCheck.ERROR_IF_NOT_EXISTS_OR_RECORD_TYPE_CHANGED);
}
/**
* Save the given record and throw an exception if the record does not already exist in the database.
* @param record the record to be saved
* @return wrapping object containing saved record and metadata
*/
@Nonnull
default FDBStoredRecord<M> updateRecord(@Nonnull final M record) {
return saveRecord(record, RecordExistenceCheck.ERROR_IF_NOT_EXISTS_OR_RECORD_TYPE_CHANGED);
}
/**
* Load the record with the given primary key.
* @param primaryKey the primary key for the record
* @return a {@link FDBStoredRecord} for the record or <code>null</code>.
*/
@Nullable
default FDBStoredRecord<M> loadRecord(@Nonnull final Tuple primaryKey) {
return getContext().asyncToSync(FDBStoreTimer.Waits.WAIT_LOAD_RECORD, loadRecordAsync(primaryKey));
}
/**
* Load the record with the given primary key.
* @param primaryKey the primary key for the record
* @param snapshot whether to load at snapshot isolation
* @return a {@link FDBStoredRecord} for the record or <code>null</code>.
*/
@Nullable
default FDBStoredRecord<M> loadRecord(@Nonnull final Tuple primaryKey, final boolean snapshot) {
return getContext().asyncToSync(FDBStoreTimer.Waits.WAIT_LOAD_RECORD, loadRecordAsync(primaryKey, snapshot));
}
/**
* Asynchronously load a record.
* @param primaryKey the key for the record to be loaded
* @return a CompletableFuture that will return a message or null if there was no record with that key
*/
@Nonnull
default CompletableFuture<FDBStoredRecord<M>> loadRecordAsync(@Nonnull final Tuple primaryKey) {
return loadRecordAsync(primaryKey, false);
}
/**
* Asynchronously load a record.
* @param primaryKey the key for the record to be loaded
* @param snapshot whether to load at snapshot isolation
* @return a CompletableFuture that will return a message or null if there was no record with that key
*/
@Nonnull
default CompletableFuture<FDBStoredRecord<M>> loadRecordAsync(@Nonnull final Tuple primaryKey, final boolean snapshot) {
return loadRecordInternal(primaryKey, ExecuteState.NO_LIMITS, snapshot);
}
@Nonnull
@API(API.Status.INTERNAL)
CompletableFuture<FDBStoredRecord<M>> loadRecordInternal(@Nonnull Tuple primaryKey, @Nonnull ExecuteState executeState, boolean snapshot);
/**
* Get record into FDB RYW cache.
* Caller needs to hold on to result until ready or else there is a chance it will get
* GC'ed and cancelled before then.
* @param primaryKey the primary key for the record to retrieve
* @return a future that will return {@code null} when the record is preloaded
*/
@Nonnull
CompletableFuture<Void> preloadRecordAsync(@Nonnull Tuple primaryKey);
/**
* Check if a record exists in the record store with the given primary key.
* This performs its reads at the {@link IsolationLevel#SERIALIZABLE} isolation level.
*
* @param primaryKey the primary key of the record
* @return a future that will complete to <code>true</code> if some record in record store has that primary key and
* <code>false</code> otherwise
* @see #recordExistsAsync(Tuple, IsolationLevel)
*/
@Nonnull
default CompletableFuture<Boolean> recordExistsAsync(@Nonnull final Tuple primaryKey) {
return recordExistsAsync(primaryKey, IsolationLevel.SERIALIZABLE);
}
/**
* Check if a record exists in the record store with the given primary key.
* This is slightly more efficient than loading the record and checking if that record is <code>null</code>
* as it does not have to deserialize the record, though the record's contents are still read from the
* database and sent over the network.
*
* @param primaryKey the primary key of the record
* @param isolationLevel the isolation level to use when reading
* @return a future that will complete to <code>true</code> if some record in record store has that primary key and
* <code>false</code> otherwise
*/
@Nonnull
CompletableFuture<Boolean> recordExistsAsync(@Nonnull final Tuple primaryKey, @Nonnull final IsolationLevel isolationLevel);
/**
* Check if a record exists in the record store with the given primary key.
* This method is blocking. For the non-blocking version of this method, see {@link #recordExistsAsync(Tuple)}.
*
* @param primaryKey the primary key of the record
* @return <code>true</code> if some record in record store has that primary key and <code>false</code> otherwise
* @see #recordExistsAsync(Tuple)
*/
default boolean recordExists(@Nonnull final Tuple primaryKey) {
return getContext().asyncToSync(FDBStoreTimer.Waits.WAIT_RECORD_EXISTS, recordExistsAsync(primaryKey));
}
/**
* Check if a record exists in the record store with the given primary key.
* This method is blocking. For the non-blocking version of this method, see {@link #recordExistsAsync(Tuple, IsolationLevel)}.
*
* @param primaryKey the primary key of the record
* @param isolationLevel the isolation level to use when reading
* @return <code>true</code> if some record in record store has that primary key and <code>false</code> otherwise
* @see #recordExistsAsync(Tuple)
*/
default boolean recordExists(@Nonnull final Tuple primaryKey, @Nonnull final IsolationLevel isolationLevel) {
return getContext().asyncToSync(FDBStoreTimer.Waits.WAIT_RECORD_EXISTS, recordExistsAsync(primaryKey, isolationLevel));
}
/**
* Add a read conflict as if one had read the record with the given primary key. This will cause this transaction
* to fail (with a {@link com.apple.foundationdb.record.provider.foundationdb.FDBExceptions.FDBStoreTransactionConflictException})
* if a concurrent transaction modifies the record with the provided primary key. This call however does not require
* performing any reads against the database, so it is faster and cheaper to perform than a real read. Note also that
* read-only operations are not checked for conflicts, so if this method is called, but the transaction performs
* no mutations, the transaction will never be failed with the above exception. Note also that this does not
* check that a record with this primary key actually exists in the database.
*
* <p>
* One use case is that this can be used to promote a read from {@link IsolationLevel#SNAPSHOT} to
* {@link IsolationLevel#SERIALIZABLE}. For example, if one performs a query at {@link IsolationLevel#SNAPSHOT} and
* then uses a subset of the records to determine a few other writes, then one can add conflicts to <em>only</em>
* the records actually used.
* </p>
*
* <p>
* This method should be used with care and is advised only for those users who need extra control over conflict
* ranges.
* </p>
*
* @param primaryKey the primary key of the record to add a read conflict on
* @see com.apple.foundationdb.Transaction#addReadConflictRange(byte[], byte[])
*/
void addRecordReadConflict(@Nonnull final Tuple primaryKey);
/**
* Add a write conflict as if one had modified the record with the given primary key. This will cause any concurrent
* transactions to fail (with a {@link com.apple.foundationdb.record.provider.foundationdb.FDBExceptions.FDBStoreTransactionConflictException})
* if they read the record with the provided primary key. This call however does not require performing any writes
* against the database, so it is faster and cheaper to perform than a real write. Note that this does not check
* if a record with this primary key actually exists in the database, and it does not update any indexes associated
* with the record. In this way, it is identical (in terms of conflicts) with overwriting the given record with itself,
* though it will not induce any disk I/O or cause any {@linkplain com.apple.foundationdb.Transaction#watch(byte[]) watches}
* on the modified keys to fire.
*
* <p>
* This method should be used with care and is advised only for those users who need extra control over conflict
* ranges.
* </p>
*
* @param primaryKey the primary key of the record to add a write conflict on
* @see com.apple.foundationdb.Transaction#addWriteConflictRange(byte[], byte[])
*/
void addRecordWriteConflict(@Nonnull final Tuple primaryKey);
/**
* Scan the records in the database.
*
* @param continuation any continuation from a previous scan
* @param scanProperties skip, limit and other scan properties
*
* @return a cursor that will scan everything in the range, picking up at continuation, and honoring the given scan properties
*/
@Nonnull
default RecordCursor<FDBStoredRecord<M>> scanRecords(@Nullable byte[] continuation, @Nonnull ScanProperties scanProperties) {
return scanRecords(null, null, EndpointType.TREE_START, EndpointType.TREE_END, continuation, scanProperties);
}
/**
* Scan the records in the database in a range.
*
* @param range the range to scan
* @param continuation any continuation from a previous scan
* @param scanProperties skip, limit and other scan properties
*
* @return a cursor that will scan everything in the range, picking up at continuation, and honoring the given scan properties
*/
@Nonnull
default RecordCursor<FDBStoredRecord<M>> scanRecords(@Nonnull TupleRange range, @Nullable byte[] continuation, @Nonnull ScanProperties scanProperties) {
return scanRecords(range.getLow(), range.getHigh(), range.getLowEndpoint(), range.getHighEndpoint(), continuation, scanProperties);
}
/**
* Scan the records in the database in a range.
*
* @param low low point of scan range
* @param high high point of scan point
* @param lowEndpoint whether low point is inclusive or exclusive
* @param highEndpoint whether high point is inclusive or exclusive
* @param continuation any continuation from a previous scan
* @param scanProperties skip, limit and other scan properties
*
* @return a cursor that will scan everything in the range, picking up at continuation, and honoring the given scan properties
*/
@Nonnull
RecordCursor<FDBStoredRecord<M>> scanRecords(@Nullable Tuple low, @Nullable Tuple high,
@Nonnull EndpointType lowEndpoint, @Nonnull EndpointType highEndpoint,
@Nullable byte[] continuation,
@Nonnull ScanProperties scanProperties);
/**
* Count the number of records in the database in a range.
*
* @param low low point of scan range
* @param high high point of scan point
* @param lowEndpoint whether low point is inclusive or exclusive
* @param highEndpoint whether high point is inclusive or exclusive
*
* @return a future that will complete with the number of records in the range
*/
@Nonnull
default CompletableFuture<Integer> countRecords(@Nullable Tuple low, @Nullable Tuple high,
@Nonnull EndpointType lowEndpoint, @Nonnull EndpointType highEndpoint) {
return countRecords(low, high, lowEndpoint, highEndpoint, null, ScanProperties.FORWARD_SCAN);
}
/**
* Count the number of records in the database in a range.
*
* @param low low point of scan range
* @param high high point of scan point
* @param lowEndpoint whether low point is inclusive or exclusive
* @param highEndpoint whether high point is inclusive or exclusive
* @param continuation any continuation from a previous scan
* @param scanProperties skip, limit and other scan properties
*
* @return a future that will complete with the number of records in the range
*/
@Nonnull
CompletableFuture<Integer> countRecords(@Nullable Tuple low, @Nullable Tuple high,
@Nonnull EndpointType lowEndpoint, @Nonnull EndpointType highEndpoint,
@Nullable byte[] continuation,
@Nonnull ScanProperties scanProperties);
/**
* Scan the entries in an index.
* @param index the index to scan
* @param scanType the type of scan to perform
* @param range range to scan
* @param continuation any continuation from a previous scan
* @param scanProperties skip, limit and other scan properties
* @return a cursor that will scan the index, picking up at continuation, and honoring the given scan properties
*/
@Nonnull
RecordCursor<IndexEntry> scanIndex(@Nonnull Index index, @Nonnull IndexScanType scanType,
@Nonnull TupleRange range, @Nullable byte[] continuation,
@Nonnull ScanProperties scanProperties);
/**
* Scan the records pointed to by an index.
* @param indexName the name of the index
* @return a cursor that return records pointed to by the index
*/
@Nonnull
default RecordCursor<FDBIndexedRecord<M>> scanIndexRecords(@Nonnull final String indexName) {
return scanIndexRecords(indexName, IsolationLevel.SERIALIZABLE);
}
/**
* Scan the records pointed to by an index.
* @param indexName the name of the index
* @param isolationLevel the isolation level to use when reading
* @return a cursor that return records pointed to by the index
*/
@Nonnull
default RecordCursor<FDBIndexedRecord<M>> scanIndexRecords(@Nonnull final String indexName, IsolationLevel isolationLevel) {
return scanIndexRecords(indexName, IndexScanType.BY_VALUE, TupleRange.ALL, null,
new ScanProperties(ExecuteProperties.newBuilder().setIsolationLevel(isolationLevel).build()));
}
/**
* Scan the records pointed to by an index.
* @param indexName the name of the index
* @param scanType the type of scan to perform
* @param range the range of the index to scan
* @param continuation any continuation from a previous scan
* @param scanProperties skip, limit and other scan properties
* @return a cursor that return records pointed to by the index
*/
@Nonnull
default RecordCursor<FDBIndexedRecord<M>> scanIndexRecords(@Nonnull final String indexName,
@Nonnull final IndexScanType scanType,
@Nonnull final TupleRange range,
@Nullable byte[] continuation,
@Nonnull ScanProperties scanProperties) {
return scanIndexRecords(indexName, scanType, range, continuation, IndexOrphanBehavior.ERROR, scanProperties);
}
/**
* Scan the records pointed to by an index.
* @param indexName the name of the index
* @param scanType the type of scan to perform
* @param range the range of the index to scan
* @param continuation any continuation from a previous scan
* @param orphanBehavior how the iteration process should respond in the face of entries in the index for which
* there is no associated record
* @param scanProperties skip, limit and other scan properties
* @return a cursor that return records pointed to by the index
*/
@Nonnull
default RecordCursor<FDBIndexedRecord<M>> scanIndexRecords(@Nonnull final String indexName,
@Nonnull final IndexScanType scanType,
@Nonnull final TupleRange range,
@Nullable byte[] continuation,
@Nonnull IndexOrphanBehavior orphanBehavior,
@Nonnull ScanProperties scanProperties) {
final Index index = getRecordMetaData().getIndex(indexName);
return fetchIndexRecords(scanIndex(index, scanType, range, continuation, scanProperties), orphanBehavior,
scanProperties.getExecuteProperties().getState());
}
/**
* Given a cursor that iterates over entries in an index, attempts to fetch the associated records for those entries.
*
* @param indexCursor a cursor iterating over entries in the index
* @param orphanBehavior how the iteration process should respond in the face of entries in the index for which
* there is no associated record
* @return a cursor returning indexed record entries
*/
@Nonnull
default RecordCursor<FDBIndexedRecord<M>> fetchIndexRecords(@Nonnull RecordCursor<IndexEntry> indexCursor,
@Nonnull IndexOrphanBehavior orphanBehavior) {
return fetchIndexRecords(indexCursor, orphanBehavior, ExecuteState.NO_LIMITS);
}
/**
* Given a cursor that iterates over entries in an index, attempts to fetch the associated records for those entries.
*
* @param indexCursor A cursor iterating over entries in the index.
* @param orphanBehavior How the iteration process should respond in the face of entries in the index for which
* there is no associated record.
* @param executeState the {@link ExecuteState} associated with this query execution
* @return A cursor returning indexed record entries.
*/
@Nonnull
default RecordCursor<FDBIndexedRecord<M>> fetchIndexRecords(@Nonnull RecordCursor<IndexEntry> indexCursor,
@Nonnull IndexOrphanBehavior orphanBehavior,
@Nonnull ExecuteState executeState) {
RecordCursor<FDBIndexedRecord<M>> recordCursor = indexCursor.mapPipelined(entry ->
loadIndexEntryRecord(entry, orphanBehavior, executeState), getPipelineSize(PipelineOperation.INDEX_TO_RECORD));
if (orphanBehavior == IndexOrphanBehavior.SKIP) {
recordCursor = recordCursor.filter(Objects::nonNull);
}
return recordCursor;
}
/**
* Scan the records pointed to by an index equal to indexed values.
* @param indexName the name of the index
* @param values a left-subset of values of indexed fields
* @return a cursor that return records pointed to by the index
*/
@Nonnull
default RecordCursor<FDBIndexedRecord<M>> scanIndexRecordsEqual(@Nonnull final String indexName, @Nonnull final Object... values) {
final Tuple tuple = Tuple.from(values);
final TupleRange range = TupleRange.allOf(tuple);
return scanIndexRecords(indexName, IndexScanType.BY_VALUE, range, null, ScanProperties.FORWARD_SCAN);
}
/**
* Scan the records pointed to by an index between two indexed values.
* @param indexName the name of the index
* @param low the low value for the first indexed field
* @param high the high value for the first indexed field
* @return a cursor that return records pointed to by the index
*/
@Nonnull
default RecordCursor<FDBIndexedRecord<M>> scanIndexRecordsBetween(@Nonnull final String indexName,
@Nullable final Object low, @Nullable final Object high) {
final Tuple lowTuple = Tuple.from(low);
final Tuple highTuple = Tuple.from(high);
final TupleRange range = new TupleRange(lowTuple, highTuple,
EndpointType.RANGE_INCLUSIVE, EndpointType.RANGE_INCLUSIVE);
return scanIndexRecords(indexName, IndexScanType.BY_VALUE, range, null, ScanProperties.FORWARD_SCAN);
}
/**
* Determine if a given index entry points to a record.
* @param entry the index entry to check
* @param isolationLevel whether to use snapshot read
* @return a future that completes with {@code true} if the given index entry still points to a record
*/
@Nonnull
default CompletableFuture<Boolean> hasIndexEntryRecord(@Nonnull final IndexEntry entry,
@Nonnull final IsolationLevel isolationLevel) {
return recordExistsAsync(entry.getPrimaryKey(), isolationLevel);
}
/**
* Using the given index entry, resolve the primary key and asynchronously return the referenced record.
* @param entry the index entry to be resolved
* @param orphanBehavior the {@link IndexOrphanBehavior} to apply if the record is not found
* @return the record referred to by the given index entry
*/
@Nonnull
default CompletableFuture<FDBIndexedRecord<M>> loadIndexEntryRecord(@Nonnull final IndexEntry entry,
@Nonnull final IndexOrphanBehavior orphanBehavior) {
return loadIndexEntryRecord(entry, orphanBehavior, ExecuteState.NO_LIMITS);
}
/**
* Using the given index entry, resolve the primary key and asynchronously return the referenced record.
* @param entry the index entry to be resolved
* @param orphanBehavior the {@link IndexOrphanBehavior} to apply if the record is not found
* @param executeState an execution state object to be used to enforce limits on query execution
* @return the record referred to by the given index entry
*/
@Nonnull
default CompletableFuture<FDBIndexedRecord<M>> loadIndexEntryRecord(@Nonnull final IndexEntry entry,
@Nonnull final IndexOrphanBehavior orphanBehavior,
@Nonnull final ExecuteState executeState) {
final Tuple primaryKey = entry.getPrimaryKey();
return loadRecordInternal(primaryKey, executeState,false).thenApply(record -> {
if (record == null) {
switch (orphanBehavior) {
case SKIP:
return null;
case RETURN:
break;
case ERROR:
if (getTimer() != null) {
getTimer().increment(FDBStoreTimer.Counts.BAD_INDEX_ENTRY);
}
throw new RecordCoreStorageException("record not found from index entry").addLogInfo(
LogMessageKeys.INDEX_NAME, entry.getIndex().getName(),
LogMessageKeys.PRIMARY_KEY, primaryKey,
LogMessageKeys.INDEX_KEY, entry.getKey(),
getSubspaceProvider().logKey(), getSubspaceProvider().toString(getContext()));
default:
throw new RecordCoreException("Unexpected index orphan behavior: " + orphanBehavior);
}
}
return new FDBIndexedRecord<>(entry, record);
});
}
/**
* Return a tuple to be used as the key for an index entry for the given value and primary key.
* @param index the index for which this will be an entry
* @param valueKey the indexed value(s) for the entry
* @param primaryKey the primary key for the record
* @return the key to use for an index entry, the two tuples appended with redundant parts of the primary key removed
*/
@Nonnull
static Tuple indexEntryKey(@Nonnull Index index, @Nonnull Tuple valueKey, @Nonnull Tuple primaryKey) {
List<Object> primaryKeys = primaryKey.getItems();
index.trimPrimaryKey(primaryKeys);
if (primaryKeys.isEmpty()) {
return valueKey;
} else {
return valueKey.addAll(primaryKeys);
}
}
/**
* Scan the list of uniqueness violations for an index for violations with a specific value.
* This is similar to the version of {@link FDBRecordStoreBase#scanUniquenessViolations(Index, TupleRange, byte[], ScanProperties) scanUniquenessViolations()}
* that takes a {@link TupleRange}, but this version only selects violations that have the
* given key as the uniqueness violation key.
*
* @param index the index to scan the uniqueness violations of
* @param valueKey the key (as a tuple) of the index whose violations to scan
* @param continuation any continuation from a previous scan
* @param scanProperties skip, limit and other scan properties
* @return a cursor that will return uniqueness violations stored for the given index in the given store
*/
@Nonnull
default RecordCursor<RecordIndexUniquenessViolation> scanUniquenessViolations(@Nonnull Index index, @Nonnull Tuple valueKey, @Nullable byte[] continuation, @Nonnull ScanProperties scanProperties) {
TupleRange range = TupleRange.allOf(valueKey);
return scanUniquenessViolations(index, range, continuation, scanProperties);
}
/**
* Scan the list of uniqueness violations for an index for violations with a specific value.
* This is similar to the version of {@link FDBRecordStoreBase#scanUniquenessViolations(Index, TupleRange, byte[], ScanProperties) scanUniquenessViolations()}
* that takes a {@link TupleRange}, but this version only selects violations that have the
* given key as the uniqueness violation key.
*
* @param index the index to scan the uniqueness violations of
* @param indexKey the key of the index whose violations to scan
* @param continuation any continuation from a previous scan
* @param scanProperties skip, limit and other scan properties
* @return a cursor that will return uniqueness violations stored for the given index in the given store
*/
@Nonnull
default RecordCursor<RecordIndexUniquenessViolation> scanUniquenessViolations(@Nonnull Index index, @Nonnull Key.Evaluated indexKey, @Nullable byte[] continuation, @Nonnull ScanProperties scanProperties) {
return scanUniquenessViolations(index, indexKey.toTuple(), continuation, scanProperties);
}
/**
* Scan the list of uniqueness violations for an index for violations with a specific value.
* This is similar to the version of {@link FDBRecordStoreBase#scanUniquenessViolations(Index, TupleRange, byte[], ScanProperties) scanUniquenessViolations()}
* that takes a {@link TupleRange}, but this version only selects violations that have the
* given key as the uniqueness violation key. It does not limit the number of responses it returns.
*
* @param index the index to scan the uniqueness violations of
* @param valueKey the key (as a tuple) of the index whose violations to scan
* @return a cursor that will return uniqueness violations stored for the given index in the given store
*/
@Nonnull
default RecordCursor<RecordIndexUniquenessViolation> scanUniquenessViolations(@Nonnull Index index, @Nonnull Tuple valueKey) {
return scanUniquenessViolations(index, valueKey, null, ScanProperties.FORWARD_SCAN);
}
/**
* Scan the list of uniqueness violations for an index for violations with a specific value.
* This is similar to the version of {@link FDBRecordStoreBase#scanUniquenessViolations(Index, TupleRange, byte[], ScanProperties) scanUniquenessViolations()}
* that takes a {@link TupleRange}, but this version only selects violations that have the
* given key as the uniqueness violation key. It does not limit the number of responses it
* returns.
*
* @param index the index to scan the uniqueness violations of
* @param indexKey the key of the index whose violations to scan
* @return a cursor that will return uniqueness violations stored for the given index in the given store
*/
@Nonnull
default RecordCursor<RecordIndexUniquenessViolation> scanUniquenessViolations(@Nonnull Index index, @Nonnull Key.Evaluated indexKey) {
return scanUniquenessViolations(index, indexKey, null, ScanProperties.FORWARD_SCAN);
}
/**
* Scan the list of uniqueness violations for an index for violations with a specific value.
* This is similar to the version of {@link FDBRecordStoreBase#scanUniquenessViolations(Index, TupleRange, byte[], ScanProperties) scanUniquenessViolations()}
* that takes a {@link TupleRange}, but this version tries to retrieve all of the violations it can
* subject to the limit specified.
*
* @param index the index to scan the uniqueness violations of
* @param continuation any continuation from a previous scan
* @param scanProperties skip, limit and other scan properties
* @return a cursor that will return uniqueness violations stored for the given index in the given store
*/
@Nonnull
default RecordCursor<RecordIndexUniquenessViolation> scanUniquenessViolations(@Nonnull Index index, @Nullable byte[] continuation, @Nonnull ScanProperties scanProperties) {
return scanUniquenessViolations(index, TupleRange.ALL, continuation, scanProperties);
}
/**
* Scan the list of uniqueness violations for an index for violations with a specific value.
* This is similar to the version of {@link FDBRecordStoreBase#scanUniquenessViolations(Index, TupleRange, byte[], ScanProperties) scanUniquenessViolations()}
* that takes a {@link TupleRange}, but this version tries to retrieve all of the violations it can
* subject to the limit specified.
*
* @param index the index to scan the uniqueness violations of
* @param limit the maximum number of uniqueness violations to report
* @return a cursor that will return uniqueness violations stored for the given index in the given store
*/
@Nonnull
default RecordCursor<RecordIndexUniquenessViolation> scanUniquenessViolations(@Nonnull Index index, int limit) {
return scanUniquenessViolations(index, null, new ScanProperties(ExecuteProperties.newBuilder()
.setReturnedRowLimit(limit)
.setIsolationLevel(IsolationLevel.SERIALIZABLE)
.build()));
}
/**
* Scan the list of uniqueness violations for an index for violations with a specific value.
* This is similar to the version of {@link FDBRecordStoreBase#scanUniquenessViolations(Index, TupleRange, byte[], ScanProperties) scanUniquenessViolations()}
* that takes a {@link TupleRange}, but this version tries to retrieve all of the violations it can. It
* does not try to limit its results.
*
* @param index the index to scan the uniqueness violations of
* @return a cursor that will return uniqueness violations stored for the given index in the given store
*/
@Nonnull
default RecordCursor<RecordIndexUniquenessViolation> scanUniquenessViolations(@Nonnull Index index) {
return scanUniquenessViolations(index, Integer.MAX_VALUE);
}
/**
* Scan the list of uniqueness violations identified for an index. It looks only for violations
* within the given range subject to the given limit and (possibly) will go in reverse.
* They will be returned in an order that is grouped by the index value keys that they have in common
* and will be ordered within the grouping by the primary key.
*
* <p>
* Because of how the data are stored, each primary key that is part of a uniqueness violation
* will appear at most once for each index key that is causing a violation. The associated
* existing key is going to be one of the other keys, but it might not be the only one.
* This means that the total number of violations per index key is capped at the number of records in the
* store (rather than the square), but it also means that the existing key data is of limited help.
*
* @param index the index to scan the uniqueness violations of
* @param range the range of tuples to include in the scan
* @param continuation any continuation from a previous scan
* @param scanProperties skip, limit and other scan properties
* @return a cursor that will return uniqueness violations stored for the given index in the given store
*/
@Nonnull
RecordCursor<RecordIndexUniquenessViolation> scanUniquenessViolations(@Nonnull Index index, @Nonnull TupleRange range,
@Nullable byte[] continuation,
@Nonnull ScanProperties scanProperties);
/**
* Removes all of the records that have the given value set as their index index value (are thus causing
* a uniqueness violation) except for the one that has the given primary key (if the key is not <code>null</code>).
* This is like the version of {@link FDBRecordStoreBase#resolveUniquenessViolation(Index, Tuple, Tuple) resolveUniquenessViolation()}
* that takes a {@link Tuple}, but this takes the index value as a {@link Key.Evaluated} instead.
* @param index the index to resolve uniqueness violations for
* @param indexKey the value of the index that is being removed
* @param primaryKey the primary key of the record that should remain (or <code>null</code> to remove all of them)
* @return a future that will complete when all of the records have been removed
*/
@Nonnull
default CompletableFuture<Void> resolveUniquenessViolation(@Nonnull Index index, @Nonnull Key.Evaluated indexKey, @Nullable Tuple primaryKey) {
return resolveUniquenessViolation(index, indexKey.toTuple(), primaryKey);
}
/**
* Removes all of the records that have the given value set as their index value (and are thus causing a
* uniqueness violation) except for the one that has the given primary key (if the key is not <code>null</code>).
* It also cleans up the set of uniqueness violations so that none of the remaining entries will
* be associated with the given value key.
* @param index the index to resolve uniqueness violations for
* @param valueKey the value of the index that is being removed
* @param primaryKey the primary key of the record that should remain (or <code>null</code> to remove all of them)
* @return a future that will complete when all of the records have been removed
*/
@Nonnull
CompletableFuture<Void> resolveUniquenessViolation(@Nonnull Index index, @Nonnull Tuple valueKey, @Nullable Tuple primaryKey);
/**
* Return the key portion of <code>entry</code>, which should be the key with the index value
* as a tuple. This is used to store the index uniqueness violations when building a
* unique index.
* @param valueKey the value of the index for a record
* @param primaryKey the primary key for a record
* @return a tuple that is the two keys appended together
*/
@Nonnull
static Tuple uniquenessViolationKey(@Nonnull Tuple valueKey, @Nonnull Tuple primaryKey) {
return valueKey.addAll(primaryKey);
}
/**
* Async version of {@link #deleteRecord}.
* @param primaryKey the primary key of the record to delete
* @return a future that completes {@code true} if the record was present to be deleted
*/
@Nonnull
CompletableFuture<Boolean> deleteRecordAsync(@Nonnull final Tuple primaryKey);
/**
* Delete the record with the given primary key.
*
* @param primaryKey the primary key for the record to be deleted
*
* @return true if something was there to delete, false if the record didn't exist
*/
default boolean deleteRecord(@Nonnull Tuple primaryKey) {
return getContext().asyncToSync(FDBStoreTimer.Waits.WAIT_DELETE_RECORD, deleteRecordAsync(primaryKey));
}
/**
* Delete all the data in the record store.
* <p>
* Everything except the store header and index state information is cleared from the database.
* This is is an efficient operation as all data are contiguous.
* This means that any {@linkplain IndexState#DISABLED disabled} or {@linkplain IndexState#WRITE_ONLY write-only}
* index will remain in its disabled or write-only state after all of the data are cleared. If one also wants
* to reset all index states, one can call {@link FDBRecordStore#rebuildAllIndexes()}, which should complete
* quickly on an empty record store. If one wants to remove the record store entirely (including the store
* header and all index states), one should call {@link FDBRecordStore#deleteStore(FDBRecordContext, KeySpacePath)}
* instead of this method.
*
* <p>
* Note that, at the moment, this operation also has the side effect of resetting
* {@link com.apple.foundationdb.record.metadata.IndexTypes#MAX_EVER_TUPLE MAX_EVER} and
* {@link com.apple.foundationdb.record.metadata.IndexTypes#MIN_EVER_TUPLE MIN_EVER} indexes.
* See: <a href="https://github.com/FoundationDB/fdb-record-layer/issues/398">Issue #398</a>.
* </p>
*
* @see FDBRecordStore#deleteStore(FDBRecordContext, KeySpacePath)
* @see FDBRecordStore#deleteStore(FDBRecordContext, Subspace)
*/
void deleteAllRecords();
/**
* Delete records and associated index entries matching a query filter.
* <p>
* Throws an exception if the operation cannot be done efficiently in a small number of contiguous range
* clears. In practice, this means that the query filter must constrain a prefix of all record types' primary keys
* and of all indexes' root expressions.
*
* @param component the query filter for records to delete efficiently
*/
default void deleteRecordsWhere(@Nonnull QueryComponent component) {
getContext().asyncToSync(FDBStoreTimer.Waits.WAIT_DELETE_RECORD, deleteRecordsWhereAsync(component));
}
/**
* Delete records and associated index entries matching a query filter.
* <p>
* Throws an exception if the operation cannot be done efficiently in a small number of contiguous range
* clears. In practice, this means both that all record types must have a record type key prefix and
* that the query filter must constrain a prefix of all record types' primary keys and of all indexes' root
* expressions.
*
* @param recordType the type of records to delete
* @param component the query filter for records to delete efficiently or {@code null} to delete all records of the given type
*/
default void deleteRecordsWhere(@Nonnull String recordType, @Nullable QueryComponent component) {
getContext().asyncToSync(FDBStoreTimer.Waits.WAIT_DELETE_RECORD, deleteRecordsWhereAsync(recordType, component));
}
/**
* Async version of {@link #deleteRecordsWhereAsync}.
*
* @param component the query filter for records to delete efficiently
* @return a future that will be complete when the delete is done
*/
@Nonnull
CompletableFuture<Void> deleteRecordsWhereAsync(@Nonnull QueryComponent component);
/**
* Async version of {@link #deleteRecordsWhere(String, QueryComponent)}.
* @param recordType the type of records to delete
* @param component the query filter for records to delete efficiently or {@code null} to delete all records of the given type
* @return a future that will be complete when the delete is done
*/
@Nonnull
default CompletableFuture<Void> deleteRecordsWhereAsync(@Nonnull String recordType, @Nullable QueryComponent component) {
return deleteRecordsWhereAsync(FDBRecordStore.mergeRecordTypeAndComponent(recordType, component));
}
/**
* Function for computing the number of elements to allow in the asynchronous pipeline for an operation of the given
* type.
*/
interface PipelineSizer {
int getPipelineSize(@Nonnull PipelineOperation pipelineOperation);
}
/**
* Get the function for computing the number of elements to allow in the asynchronous pipeline for an operation of the given
* type.
* @return the pipeline sizer
*/
@Nonnull
PipelineSizer getPipelineSizer();
/**
* Get the number of elements to allow in the asynchronous pipeline for an operation of the given type.
* @param pipelineOperation the operation
* @return the number of elements to pipeline
*/
default int getPipelineSize(@Nonnull PipelineOperation pipelineOperation) {
return getPipelineSizer().getPipelineSize(pipelineOperation);
}
/**
* Compute an estimated size of the store in bytes. The estimate will include all data in the store, including
* all records and indexes.
*
* <p>
* This uses a sample maintained by the database to efficiently compute an estimate for the size of the store
* without needing to scan all data. Because keys in this data structure are sampled, the value will not be
* exact. If an exact size is needed, the
* {@link com.apple.foundationdb.record.provider.foundationdb.cursors.SizeStatisticsCollectorCursor} can
* be used, though note that that cursor must read the entire store to produce its statistics.
* </p>
*
* @return a future that will contain an estimate for the size of the store
*/
@Nonnull
CompletableFuture<Long> estimateStoreSizeAsync();
/**
* Compute an estimated size of all records in the store in bytes. The estimate will only include the space used
* by the records and excludes all other data maintained by the store. (For example, index data is <em>not</em>
* included in the returned estimate.)
*
* <p>
* This uses the same method for computing the estimate as {@link #estimateStoreSizeAsync()}.
* </p>
*
* @return a future that will contain an estimate for the size of all records in the store
* @see #estimateStoreSizeAsync()
*/
@Nonnull
default CompletableFuture<Long> estimateRecordsSizeAsync() {
return estimateRecordsSizeAsync(TupleRange.ALL);
}
/**
* Compute an estimated size in bytes of all records in the store within the given primary key range. The estimate
* will only include the space used by the records and excludes all other data maintained by the store. (For
* example, index data is <em>not</em> included in the returned estimate.)
*
* <p>
* This uses the same method for computing the estimate as {@link #estimateStoreSizeAsync()}.
* </p>
*
* @param range range of records to estimate the size of
* @return a future that will contain an estimate for the size of all records in the store
* @see #estimateStoreSizeAsync()
*/
@Nonnull
CompletableFuture<Long> estimateRecordsSizeAsync(@Nonnull TupleRange range);
/**
* Get the number of records in the record store.
*
* There must be a suitable {@code COUNT} type index defined.
* @return a future that will complete to the number of records in the store
*/
@Nonnull
default CompletableFuture<Long> getSnapshotRecordCount() {
return getSnapshotRecordCount(EmptyKeyExpression.EMPTY, Key.Evaluated.EMPTY);
}
/**
* Get the number of records in a portion of the record store determined by a group key expression.
*
* There must be a suitably grouped {@code COUNT} type index defined.
* @param key the grouping key expression
* @param value the value of {@code key} to match
* @return a future that will complete to the number of records
*/
@Nonnull
CompletableFuture<Long> getSnapshotRecordCount(@Nonnull KeyExpression key, @Nonnull Key.Evaluated value);
/**
* Get the number of records in the record store of the given record type.
*
* The record type must have a {@code COUNT} index defined for it.
* @param recordTypeName record type for which to count records
* @return a future that will complete to the number of records
*/
@Nonnull
CompletableFuture<Long> getSnapshotRecordCountForRecordType(@Nonnull String recordTypeName);
default CompletableFuture<Long> getSnapshotRecordUpdateCount() {
return getSnapshotRecordUpdateCount(EmptyKeyExpression.EMPTY, Key.Evaluated.EMPTY);
}
default CompletableFuture<Long> getSnapshotRecordUpdateCount(@Nonnull KeyExpression key, @Nonnull Key.Evaluated value) {
return evaluateAggregateFunction(Collections.emptyList(), IndexFunctionHelper.countUpdates(key), value, IsolationLevel.SNAPSHOT)
.thenApply(tuple -> tuple.getLong(0));
}
/**
* Evaluate a {@link RecordFunction} against a record.
* @param function the function to evaluate
* @param record the record to evaluate against
* @param <T> the type of the result
* @return a future that will complete with the result of evaluating the function against the record
*/
@Nonnull
default <T> CompletableFuture<T> evaluateRecordFunction(@Nonnull RecordFunction<T> function,
@Nonnull FDBRecord<M> record) {
return evaluateRecordFunction(EvaluationContext.EMPTY, function, record);
}
/**
* Evaluate a {@link RecordFunction} against a record.
* @param evaluationContext evaluation context containing parameter bindings
* @param function the function to evaluate
* @param record the record to evaluate against
* @param <T> the type of the result
* @return a future that will complete with the result of evaluating the function against the record
*/
@Nonnull
default <T> CompletableFuture<T> evaluateRecordFunction(@Nonnull EvaluationContext evaluationContext,
@Nonnull RecordFunction<T> function,
@Nonnull FDBRecord<M> record) {
if (function instanceof IndexRecordFunction<?>) {
IndexRecordFunction<T> indexRecordFunction = (IndexRecordFunction<T>)function;
return evaluateIndexRecordFunction(evaluationContext, indexRecordFunction, record);
} else if (function instanceof StoreRecordFunction<?>) {
StoreRecordFunction<T> storeRecordFunction = (StoreRecordFunction<T>)function;
return evaluateStoreFunction(evaluationContext, storeRecordFunction, record);
}
throw new RecordCoreException("Cannot evaluate record function " + function);
}
/**
* Evaluate a {@link IndexRecordFunction} against a record.
* @param <T> the type of the result
* @param evaluationContext evaluation context containing parameter bindings
* @param function the function to evaluate
* @param record the record to evaluate against
* @return a future that will complete with the result of evaluating the function against the record
*/
@Nonnull
<T> CompletableFuture<T> evaluateIndexRecordFunction(@Nonnull EvaluationContext evaluationContext,
@Nonnull IndexRecordFunction<T> function,
@Nonnull FDBRecord<M> record);
/**
* Evaluate a {@link StoreRecordFunction} against a record.
* @param <T> the type of the result
* @param function the function to evaluate
* @param record the record to evaluate against
* @return a future that will complete with the result of evaluating the function against the record
*/
@Nonnull
default <T> CompletableFuture<T> evaluateStoreFunction(@Nonnull StoreRecordFunction<T> function,
@Nonnull FDBRecord<M> record) {
return evaluateStoreFunction(EvaluationContext.EMPTY, function, record);
}
/**
* Evaluate a {@link StoreRecordFunction} against a record.
* @param <T> the type of the result
* @param evaluationContext evaluation context containing parameter bindings
* @param function the function to evaluate
* @param record the record to evaluate against
* @return a future that will complete with the result of evaluating the function against the record
*/
@Nonnull
<T> CompletableFuture<T> evaluateStoreFunction(@Nonnull EvaluationContext evaluationContext,
@Nonnull StoreRecordFunction<T> function,
@Nonnull FDBRecord<M> record);
/**
* Evaluate an {@link IndexAggregateFunction} against a range of the store.
*
* Before calling {@link #evaluateAggregateFunction(List, IndexAggregateFunction, TupleRange, IsolationLevel)},
* this overload adjusts the given range to include any prefix in the function itself.
* @param evaluationContext evaluation context containing parameter bindings
* @param recordTypeNames record types for which to find a matching index
* @param aggregateFunction the function to evaluate
* @param range the range of records (group) for which to evaluate
* @param isolationLevel whether to use snapshot reads
* @return a future that will complete with the result of evaluating the aggregate
*/
@Nonnull
default CompletableFuture<Tuple> evaluateAggregateFunction(@Nonnull EvaluationContext evaluationContext,
@Nonnull List<String> recordTypeNames,
@Nonnull IndexAggregateFunction aggregateFunction,
@Nonnull TupleRange range,
@Nonnull IsolationLevel isolationLevel) {
return evaluateAggregateFunction(recordTypeNames, aggregateFunction,
aggregateFunction.adjustRange(evaluationContext, range), isolationLevel);
}
/**
* Evaluate an {@link IndexAggregateFunction} against a group value.
* @param recordTypeNames record types for which to find a matching index
* @param aggregateFunction the function to evaluate
* @param value the value for the group key(s)
* @param isolationLevel whether to use snapshot reads
* @return a future that will complete with the result of evaluating the aggregate
*/
@Nonnull
default CompletableFuture<Tuple> evaluateAggregateFunction(@Nonnull List<String> recordTypeNames,
@Nonnull IndexAggregateFunction aggregateFunction,
@Nonnull Key.Evaluated value,
@Nonnull IsolationLevel isolationLevel) {
return evaluateAggregateFunction(recordTypeNames, aggregateFunction, TupleRange.allOf(value.toTuple()), isolationLevel);
}
/**
* Evaluate an {@link IndexAggregateFunction} against a range of the store.
* @param recordTypeNames record types for which to find a matching index
* @param aggregateFunction the function to evaluate
* @param range the range of records (group) for which to evaluate
* @param isolationLevel whether to use snapshot reads
* @return a future that will complete with the result of evaluating the aggregate
*/
@Nonnull
CompletableFuture<Tuple> evaluateAggregateFunction(@Nonnull List<String> recordTypeNames,
@Nonnull IndexAggregateFunction aggregateFunction,
@Nonnull TupleRange range,
@Nonnull IsolationLevel isolationLevel);
/**
* Get a query result record from a stored record.
* This is from a direct record scan / lookup without an associated index.
* @param storedRecord the stored record to convert to a queried record
* @return a {@link FDBQueriedRecord} corresponding to {@code storedRecord}
*/
@Nonnull
default FDBQueriedRecord<M> queriedRecord(@Nonnull FDBStoredRecord<M> storedRecord) {
return FDBQueriedRecord.stored(storedRecord);
}
/**
* Get a query result record from an indexed record.
* This is from an index scan and permits access to the underlying index entry.
* @param indexedRecord the indexed record to convert to a queried record
* @return a {@link FDBQueriedRecord} corresponding to {@code indexedRecord}
*/
@Nonnull
default FDBQueriedRecord<M> queriedRecord(@Nonnull FDBIndexedRecord<M> indexedRecord) {
return FDBQueriedRecord.indexed(indexedRecord);
}
/**
* Get a query result from a covering index entry.
* The entire <code>StoredRecord</code> is not available, and the record only has fields from the index entry.
* Normal indexes have a primary key in their entries, but aggregate indexes do not.
* @param index the index from which the entry came
* @param indexEntry the index entry
* @param recordType the record type of the indexed record
* @param partialRecord the partially populated Protobuf record
* @param hasPrimaryKey whether the index entry has a primary key
* @return a {@link FDBQueriedRecord} corresponding to {@code indexEntry}
*/
@Nonnull
default FDBQueriedRecord<M> coveredIndexQueriedRecord(@Nonnull Index index, @Nonnull IndexEntry indexEntry, @Nonnull RecordType recordType,
@Nonnull M partialRecord, boolean hasPrimaryKey) {
return FDBQueriedRecord.covered(index, indexEntry,
hasPrimaryKey ? index.getEntryPrimaryKey(indexEntry.getKey()) : TupleHelpers.EMPTY,
recordType, partialRecord);
}
/**
* Plan and execute a query.
* @param query the query to plan and execute
* @return a cursor for query results
* @see RecordQueryPlan#execute
*/
@Nonnull
default RecordCursor<FDBQueriedRecord<M>> executeQuery(@Nonnull RecordQuery query) {
return executeQuery(planQuery(query));
}
/**
* Plan and execute a query.
* @param query the query to plan and execute
* @param continuation continuation from a previous execution of this same query
* @param executeProperties limits on execution
* @return a cursor for query results
* @see RecordQueryPlan#execute
*/
@Nonnull
default RecordCursor<FDBQueriedRecord<M>> executeQuery(@Nonnull RecordQuery query,
@Nullable byte[] continuation,
@Nonnull ExecuteProperties executeProperties) {
return executeQuery(planQuery(query), continuation, executeProperties);
}
/**
* Execute a query.
* @param plan the plan to execute
* @return a cursor for query results
* @see RecordQueryPlan#execute
*/
@Nonnull
default RecordCursor<FDBQueriedRecord<M>> executeQuery(@Nonnull RecordQueryPlan plan) {
return plan.execute(this, EvaluationContext.EMPTY);
}
/**
* Execute a query.
* @param plan the plan to execute
* @param continuation continuation from a previous execution of this same plan
* @param executeProperties limits on execution
* @return a cursor for query results
* @see RecordQueryPlan#execute
*/
@Nonnull
default RecordCursor<FDBQueriedRecord<M>> executeQuery(@Nonnull RecordQueryPlan plan,
@Nullable byte[] continuation,
@Nonnull ExecuteProperties executeProperties) {
return plan.execute(this, EvaluationContext.EMPTY, continuation, executeProperties);
}
/**
* Plan a query.
* @param query the query to plan
* @param parameterRelationshipGraph a set of bindings and their relationships that provide additional information
* to the planner that may improve plan quality but may also tighten requirements imposed on the parameter
* bindings that are used to execute the query
* @return a query plan
* @see RecordQueryPlanner#plan
*/
@Nonnull
RecordQueryPlan planQuery(@Nonnull RecordQuery query, @Nonnull ParameterRelationshipGraph parameterRelationshipGraph);
/**
* Plan a query.
* @param query the query to plan
* @return a query plan
* @see RecordQueryPlanner#plan
*/
@Nonnull
default RecordQueryPlan planQuery(@Nonnull RecordQuery query) {
return planQuery(query, ParameterRelationshipGraph.empty());
}
/**
* Builder for {@link FDBRecordStoreBase}.
* @param <M> type used to represent stored records
* @param <R> type of built record store
*/
interface BaseBuilder<M extends Message, R extends FDBRecordStoreBase<M>> {
/**
* Get the serializer used to convert records into byte arrays.
* @return the serializer to use
*/
@Nullable
RecordSerializer<M> getSerializer();
/**
* Set the serializer used to convert records into byte arrays.
* @param serializer the serializer to use
* @return this builder
*/
@Nonnull
BaseBuilder<M, R> setSerializer(@Nonnull RecordSerializer<M> serializer);
/**
* Get the storage format version for this store.
* @return the format version to use
*/
int getFormatVersion();
/**
* Set the storage format version for this store.
*
* Normally, this should be set to the highest format version supported by all code that may access the record
* store. {@link #open} will set the store's format version to <code>max(max_supported_version, current_version)</code>.
* This is to support cases where the target cannot be changed everywhere at once and some instances write the new version before others
* know that they are licensed to do so. It is still <em>critically</em> important that <em>all</em> instances know how to handle
* the new version before <em>any</em> instance allows it.
*
* When installing a new version of the record layer library that includes a format change, first install everywhere having arranged for
* {@link #setFormatVersion} to be called with the <em>old</em> format version. Then, after that install is complete, change to the newer version.
* @param formatVersion the format version to use
* @return this builder
*/
@Nonnull
BaseBuilder<M, R> setFormatVersion(int formatVersion);
/**
* Get the provider for the record store's meta-data.
* @return the meta-data source to use
*/
@Nullable
RecordMetaDataProvider getMetaDataProvider();
/**
* Set the provider for the record store's meta-data.
* If {@link #setMetaDataStore} is also called, the provider will only be used to initialize the meta-data store when it is empty. The record store will be built using the store as its provider.
* @param metaDataProvider the meta-data source to use
* @return this builder
*/
@Nonnull
BaseBuilder<M, R> setMetaDataProvider(@Nullable RecordMetaDataProvider metaDataProvider);
/**
* Get the {@link FDBMetaDataStore} to use as the source of meta-data.
* @return the meta-data store to use
*/
@Nullable
FDBMetaDataStore getMetaDataStore();
/**
* Set the {@link FDBMetaDataStore} to use as the source of meta-data.
* If {@link #setMetaDataProvider} is also called, it will be used to seed the store.
* @param metaDataStore the meta-data store to use
* @return this builder
*/
@Nonnull
BaseBuilder<M, R> setMetaDataStore(@Nullable FDBMetaDataStore metaDataStore);
/**
* Get the record context (transaction) to use for the record store.
* @return context the record context / transaction to use
*/
@Nullable
FDBRecordContext getContext();
/**
* Set the record context (transaction) to use for the record store.
* @param context the record context / transaction to use
* @return this builder
*/
@Nonnull
BaseBuilder<M, R> setContext(@Nullable FDBRecordContext context);
/**
* Get the subspace provider.
* @return the subspace provider
*/
@Nullable
SubspaceProvider getSubspaceProvider();
/**
* Set the subspace provider from a subspace provider.
* @param subspaceProvider the subspace provider
* @return this builder
*/
@Nonnull
BaseBuilder<M, R> setSubspaceProvider(@Nullable SubspaceProvider subspaceProvider);
/**
* Set the subspace to use for the record store.
* The record store is allowed to use the entire subspace, so it should not overlap any other record store's subspace.
* It is preferred to {@link #setKeySpacePath} rather than this because key space path provides more meaningful logs.
* @param subspace the subspace to use
* @return this builder
*/
@Nonnull
@API(API.Status.UNSTABLE)
BaseBuilder<M, R> setSubspace(@Nullable Subspace subspace);
/**
* Set the key space path to use for the record store.
* The record store is allowed to use the entire subspace, so it should not overlap any other record store's subspace.
* Note: The context should be set before setting the key space path.
* @param keySpacePath the key space path to use
* @return this builder
*/
@Nonnull
BaseBuilder<M, R> setKeySpacePath(@Nullable KeySpacePath keySpacePath);
/**
* Get the {@link FDBRecordStore.UserVersionChecker function} to be used to check the meta-data version of the record store.
* @return the checker function to use
*/
@Nullable
UserVersionChecker getUserVersionChecker();
/**
* Set the {@link FDBRecordStore.UserVersionChecker function} to be used to check the meta-data version of the record store.
* @param userVersionChecker the checker function to use
* @return this builder
*/
@Nonnull
BaseBuilder<M, R> setUserVersionChecker(@Nullable UserVersionChecker userVersionChecker);
/**
* Get the registry of index maintainers to be used by the record store.
* @return the index registry to use
*/
@Nonnull
IndexMaintainerRegistry getIndexMaintainerRegistry();
/**
* Set the registry of index maintainers to be used by the record store.
* @param indexMaintainerRegistry the index registry to use
* @return this builder
* @see FDBRecordStore#getIndexMaintainer
* @see RecordMetaDataBuilder#setIndexMaintainerRegistry
*/
@Nonnull
BaseBuilder<M, R> setIndexMaintainerRegistry(@Nonnull IndexMaintainerRegistry indexMaintainerRegistry);
/**
* Get the {@link IndexMaintenanceFilter index filter} to be used by the record store.
* @return the index filter to use
*/
@Nonnull
IndexMaintenanceFilter getIndexMaintenanceFilter();
/**
* Set the {@link IndexMaintenanceFilter index filter} to be used by the record store.
* @param indexMaintenanceFilter the index filter to use
* @return this builder
*/
@Nonnull
BaseBuilder<M, R> setIndexMaintenanceFilter(@Nonnull IndexMaintenanceFilter indexMaintenanceFilter);
/**
* Get the {@link FDBRecordStoreBase.PipelineSizer object} to be used to determine the depth of pipelines run by the record store.
* @return the sizer to use
*/
@Nonnull
PipelineSizer getPipelineSizer();
/**
* Set the {@link PipelineSizer object} to be used to determine the depth of pipelines run by the record store.
* @param pipelineSizer the sizer to use
* @return this builder
* @see FDBRecordStoreBase#getPipelineSize
*/
@Nonnull
BaseBuilder<M, R> setPipelineSizer(@Nonnull PipelineSizer pipelineSizer);
/**
* Get the store state cache to be used by the record store. If the builder returns {@code null}, the produced
* record store will use the default store state cache provided by the {@link FDBDatabase} when initializing
* the record store state.
*
* @return the store state cached used by this record store of {@code null} if it uses the database default
*/
@API(API.Status.EXPERIMENTAL)
@Nullable
FDBRecordStoreStateCache getStoreStateCache();
/**
* Set the store state cache to be used by the record store. If {@code null} is provided or if this method
* is never called, the produced record store will use the default store state cache provided by the
* {@link FDBDatabase}.
*
* @param storeStateCache the store state cache to used by this record store or {@code null} to specify that this should use the database default
* @return this builder
*/
@API(API.Status.EXPERIMENTAL)
@Nonnull
BaseBuilder<M, R> setStoreStateCache(@Nonnull FDBRecordStoreStateCache storeStateCache);
/**
* Make a copy of this builder.
* This can be used to share enough of the state to connect to the same record store several times in different transactions.
* <pre>
* builder = FDBRecordStore.newBuilder().setMetaDataProvider(metadata).setSubspace(subspace)
* store1 = builder.copyBuilder().setContext(context1).build()
* store2 = builder.copyBuilder().setContext(context2).build()
* </pre>
* @return a new builder with the same state as this builder
*/
@Nonnull
BaseBuilder<M, R> copyBuilder();
/**
* Build the record store.
* @return a new record store with the desired state.
*/
@Nonnull
R build();
/**
* Synchronous version of {@link #uncheckedOpenAsync}.
* @return a store with the appropriate parameters set
*/
@Nonnull
default R uncheckedOpen() {
return getContext().asyncToSync(FDBStoreTimer.Waits.WAIT_LOAD_RECORD_STORE_STATE, uncheckedOpenAsync());
}
/**
* Synchronous version of {@link #createAsync}.
* @return a store with the appropriate parameters set
*/
@Nonnull
default R create() {
return getContext().asyncToSync(FDBStoreTimer.Waits.WAIT_CHECK_VERSION, createAsync());
}
/**
* Synchronous version of {@link #openAsync}.
* @return a store with the appropriate parameters set
*/
@Nonnull
default R open() {
return getContext().asyncToSync(FDBStoreTimer.Waits.WAIT_CHECK_VERSION, openAsync());
}
/**
* Synchronous version of {@link #createOrOpenAsync}.
* @return a store with the appropriate parameters set
*/
@Nonnull
default R createOrOpen() {
return getContext().asyncToSync(FDBStoreTimer.Waits.WAIT_CHECK_VERSION, createOrOpenAsync());
}
/**
* Synchronous version of {@link #createOrOpenAsync(FDBRecordStoreBase.StoreExistenceCheck)}.
* @param existenceCheck whether the store must already exist
* @return an open record store
*/
@Nonnull
default R createOrOpen(@Nonnull FDBRecordStoreBase.StoreExistenceCheck existenceCheck) {
return getContext().asyncToSync(FDBStoreTimer.Waits.WAIT_CHECK_VERSION, createOrOpenAsync(existenceCheck));
}
/**
* Opens a <code>FDBRecordStore</code> instance without calling {@link FDBRecordStore#checkVersion}.
* @return a future that will contain a store with the appropriate parameters set when ready
*/
@Nonnull
CompletableFuture<R> uncheckedOpenAsync();
/**
* Opens a new <code>FDBRecordStore</code> instance in the given path with the given meta-data.
* The store must not have already been written to the specified subspace.
* @return a future that will contain a store with the appropriate parameters set when ready
*/
@Nonnull
default CompletableFuture<R> createAsync() {
return createOrOpenAsync(FDBRecordStoreBase.StoreExistenceCheck.ERROR_IF_EXISTS);
}
/**
* Opens an existing <code>FDBRecordStore</code> instance in the given path with the given meta-data.
* The store must have already been written to the specified subspace.
* @return a future that will contain a store with the appropriate parameters set when ready
*/
@Nonnull
default CompletableFuture<R> openAsync() {
return createOrOpenAsync(FDBRecordStoreBase.StoreExistenceCheck.ERROR_IF_NOT_EXISTS);
}
/**
* Opens a <code>FDBRecordStore</code> instance in the given path with the given meta-data.
* @return a future that will contain a store with the appropriate parameters set when ready
*/
@Nonnull
default CompletableFuture<R> createOrOpenAsync() {
return createOrOpenAsync(FDBRecordStoreBase.StoreExistenceCheck.ERROR_IF_NO_INFO_AND_NOT_EMPTY);
}
/**
* Opens a <code>FDBRecordStore</code> instance in the given path with the given meta-data.
* @param existenceCheck whether the store must already exist
* @return a future that will contain a store with the appropriate parameters set when ready
*/
@Nonnull
CompletableFuture<R> createOrOpenAsync(@Nonnull FDBRecordStoreBase.StoreExistenceCheck existenceCheck);
}
}
|
package com.diozero.util;
/*
* #%L
* Organisation: mattjlewis
* Project: Device I/O Zero - Core
* Filename: LibraryLoader.java
*
* This file is part of the diozero project. More information about this project
* can be found at http://www.diozero.com/
* %%
* Copyright (C) 2016 - 2017 mattjlewis
* %%
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
* #L%
*/
import java.io.InputStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
import java.util.HashMap;
import java.util.Map;
import org.pmw.tinylog.Logger;
public class LibraryLoader {
private static final Map<String, Boolean> LOADED_LIBRARIES = new HashMap<>();
private static final String LIBRARY_EXTENSION = System.getProperty("os.name").startsWith("Windows") ? ".dll"
: ".so";
public static void loadLibrary(Class<?> clz, String libName) throws UnsatisfiedLinkError {
loadLibrary(clz, libName, true);
}
public static void loadLibrary(Class<?> clz, String libName, boolean boardSpecific) throws UnsatisfiedLinkError {
synchronized (LOADED_LIBRARIES) {
if (LOADED_LIBRARIES.get(libName) == null) {
boolean loaded = false;
// First try load the library from within the JAR file
String lib_file;
if (boardSpecific) {
lib_file = String.format("/lib/%s/lib%s%s",
DeviceFactoryHelper.getNativeDeviceFactory().getBoardInfo().getLibraryPath(), libName,
LIBRARY_EXTENSION);
} else {
lib_file = String.format("/lib/lib%s%s", libName, LIBRARY_EXTENSION);
}
Logger.debug("Looking for lib '" + lib_file + "' on classpath");
try (InputStream is = clz.getResourceAsStream(lib_file)) {
Path path = Files.createTempFile("lib" + libName, LIBRARY_EXTENSION);
path.toFile().deleteOnExit();
Files.copy(is, path, StandardCopyOption.REPLACE_EXISTING);
Runtime.getRuntime().load(path.toString());
loaded = true;
Logger.debug("Loaded library '{}' from classpath", libName);
} catch (Throwable t) {
Logger.warn("Error loading library '{}' from classpath, trying System.loadLibrary: {}", libName, t);
}
if (!loaded) {
// Try load from the Java system library path (-Djava.library.path)
Logger.debug("Looking for lib '" + libName + "' on library path");
try {
System.loadLibrary(libName);
loaded = true;
Logger.info("Loaded library '{}' from system library path", libName);
} catch (Throwable t) {
Logger.error("Error loading library '{}' from system library path: {}", libName, t);
}
}
LOADED_LIBRARIES.put(libName, Boolean.valueOf(loaded));
}
}
}
}
|
package com.deviceinsight.kafka.health;
import static com.deviceinsight.kafka.health.KafkaConsumingHealthIndicatorTest.TOPIC;
import static org.assertj.core.api.Assertions.assertThat;
import io.micrometer.core.instrument.simple.SimpleMeterRegistry;
import kafka.server.KafkaServer;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.awaitility.Awaitility;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.actuate.health.Health;
import org.springframework.boot.actuate.health.Status;
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.test.EmbeddedKafkaBroker;
import org.springframework.kafka.test.context.EmbeddedKafka;
import org.springframework.kafka.test.core.BrokerAddress;
import org.springframework.kafka.test.utils.KafkaTestUtils;
import org.springframework.test.context.junit.jupiter.SpringExtension;
import java.time.Duration;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
@ExtendWith(SpringExtension.class)
@EmbeddedKafka(topics = TOPIC)
public class KafkaConsumingHealthIndicatorTest {
static final String TOPIC = "health-checks";
private Consumer<String, String> consumer;
@Autowired
private EmbeddedKafkaBroker embeddedKafkaBroker;
@BeforeEach
public void setUp() {
Map<String, Object> consumerConfigs =
new HashMap<>(KafkaTestUtils.consumerProps("consumer", "false", embeddedKafkaBroker));
consumer = new DefaultKafkaConsumerFactory<>(consumerConfigs, new StringDeserializer(),
new StringDeserializer()).createConsumer();
consumer.subscribe(Collections.singletonList(TOPIC));
consumer.poll(Duration.ofSeconds(1));
}
@AfterEach
public void tearDown() {
consumer.close();
embeddedKafkaBroker.getKafkaServers().forEach(KafkaServer::shutdown);
embeddedKafkaBroker.getKafkaServers().forEach(KafkaServer::awaitShutdown);
}
@Test
public void kafkaIsDown() throws Exception {
final KafkaHealthProperties kafkaHealthProperties = new KafkaHealthProperties();
kafkaHealthProperties.setTopic(TOPIC);
final KafkaProperties kafkaProperties = new KafkaProperties();
final BrokerAddress[] brokerAddresses = embeddedKafkaBroker.getBrokerAddresses();
kafkaProperties.setBootstrapServers(Collections.singletonList(brokerAddresses[0].toString()));
final KafkaConsumingHealthIndicator healthIndicator =
new KafkaConsumingHealthIndicator(kafkaHealthProperties, kafkaProperties.buildConsumerProperties(),
kafkaProperties.buildProducerProperties());
healthIndicator.subscribeAndSendMessage();
Health health = healthIndicator.health();
assertThat(health.getStatus()).isEqualTo(Status.UP);
shutdownKafka();
Awaitility.await().untilAsserted(() -> assertThat(healthIndicator.health().getStatus()).isEqualTo(Status.DOWN));
}
private void shutdownKafka() {
this.embeddedKafkaBroker.destroy();
}
}
|
package com.logicaldoc.gui.frontend.client.folder;
import com.logicaldoc.gui.common.client.Session;
import com.logicaldoc.gui.common.client.i18n.I18N;
import com.logicaldoc.gui.common.client.util.LD;
import com.logicaldoc.gui.common.client.widgets.FolderTree;
import com.smartgwt.client.types.HeaderControls;
import com.smartgwt.client.util.BooleanCallback;
import com.smartgwt.client.widgets.Button;
import com.smartgwt.client.widgets.Dialog;
import com.smartgwt.client.widgets.events.ClickEvent;
import com.smartgwt.client.widgets.events.ClickHandler;
import com.smartgwt.client.widgets.form.DynamicForm;
import com.smartgwt.client.widgets.form.fields.CheckboxItem;
import com.smartgwt.client.widgets.layout.HLayout;
import com.smartgwt.client.widgets.layout.VLayout;
import com.smartgwt.client.widgets.tree.TreeGrid;
/**
* This is the form used to copy a folder into another path
*
* @author Marco Meschieri - Logical Objects
* @since 7.1
*/
public class CopyDialog extends Dialog {
public CopyDialog() {
super();
setHeaderControls(HeaderControls.HEADER_LABEL, HeaderControls.CLOSE_BUTTON);
setTitle(I18N.message("copy"));
setWidth(470);
setHeight(280);
setCanDragResize(true);
setIsModal(true);
setShowModalMask(true);
centerInPage();
setPadding(3);
VLayout content = new VLayout();
content.setTop(10);
content.setWidth100();
content.setHeight100();
content.setMembersMargin(3);
final TreeGrid folders = new FolderTree();
folders.setWidth100();
folders.setHeight100();
HLayout buttons = new HLayout();
buttons.setWidth100();
buttons.setHeight(30);
final boolean inheritOptionEnabled = "true".equals(Session.get().getInfo()
.getConfig("gui.security.inheritoption"));
final DynamicForm form = new DynamicForm();
CheckboxItem inheritSecurity = new CheckboxItem();
inheritSecurity.setName("inheritSecurity");
inheritSecurity.setTitle(I18N.message("inheritparentsec"));
form.setItems(inheritSecurity);
Button copy = new Button(I18N.message("copy"));
copy.setAutoFit(true);
copy.setMargin(1);
copy.addClickHandler(new ClickHandler() {
public void onClick(ClickEvent event) {
long[] selectedIds = FolderNavigator.get().getSelectedIds();
String label = FolderNavigator.get().getSelectedRecord().getAttributeAsString("name");
if (selectedIds.length > 1)
label = selectedIds.length + " " + I18N.message("folders").toLowerCase();
LD.ask(I18N.message("copy"),
I18N.message("copyask",
new String[] { label, folders.getSelectedRecord().getAttributeAsString("name") }),
new BooleanCallback() {
@Override
public void execute(Boolean value) {
if (value) {
FolderNavigator.get()
.copyTo(Long.parseLong(folders.getSelectedRecord().getAttributeAsString(
"folderId")),
false,
!inheritOptionEnabled
|| "true".equals(form.getValueAsString("inheritSecurity")));
}
destroy();
}
});
}
});
Button copyFolders = new Button(I18N.message("copyfoldersonly"));
copyFolders.setAutoFit(true);
copyFolders.setMargin(1);
copyFolders.addClickHandler(new ClickHandler() {
public void onClick(ClickEvent event) {
LD.ask(I18N.message("copy"),
I18N.message("copyask", new String[] {
FolderNavigator.get().getSelectedRecord().getAttributeAsString("name"),
folders.getSelectedRecord().getAttributeAsString("name") }), new BooleanCallback() {
@Override
public void execute(Boolean value) {
if (value) {
FolderNavigator.get()
.copyTo(Long.parseLong(folders.getSelectedRecord().getAttributeAsString(
"folderId")),
true,
!inheritOptionEnabled
|| "true".equals(form.getValueAsString("inheritSecurity")));
}
destroy();
}
});
}
});
if (inheritOptionEnabled)
buttons.setMembers(copy, copyFolders, form);
else
buttons.setMembers(copy, copyFolders);
content.setMembers(folders, buttons);
addItem(content);
}
}
|
/*
* Copyright 1999-2017 Alibaba Group Holding Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alibaba.druid.bvt.pool;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import org.junit.Assert;
import junit.framework.TestCase;
import com.alibaba.druid.mock.MockDriver;
import com.alibaba.druid.mock.MockStatement;
import com.alibaba.druid.pool.DruidDataSource;
import com.alibaba.druid.stat.DruidDataSourceStatManager;
public class TestLogLongTimeTransaction extends TestCase {
private DruidDataSource dataSource;
private MockDriver driver;
protected void setUp() throws Exception {
driver = new MockDriver() {
protected ResultSet executeQuery(MockStatement stmt, String sql) throws SQLException {
try {
Thread.sleep(2);
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return super.executeQuery(stmt, sql);
}
};
dataSource = new DruidDataSource();
dataSource.setDriver(driver);
dataSource.setUrl("jdbc:mock:xxx");
dataSource.setFilters("stat,trace,log4j,encoding");
dataSource.setTransactionThresholdMillis(1);
}
protected void tearDown() throws Exception {
dataSource.close();
Assert.assertEquals(0, DruidDataSourceStatManager.getInstance().getDataSourceList().size());
}
public void test_0() throws Exception {
Connection conn = dataSource.getConnection();
conn.setAutoCommit(false);
Statement stmt = conn.createStatement();
ResultSet rs = stmt.executeQuery("SELECT 1");
rs.next();
rs.close();
stmt.close();
conn.commit();
conn.close();
}
}
|
package com.test;
import org.concordion.integration.junit4.ConcordionRunner;
import org.junit.runner.RunWith;
@RunWith(ConcordionRunner.class)
public class VariableReferenceFromNestedScope {
public void consumeString(String s) {
}
}
|
package com.kunyang.android.qq.Data.Restapi;
import com.hyphenate.exceptions.HyphenateException;
/**
* Created by 坤阳 on 2017/12/30.
*/
public class LiveException extends HyphenateException {
protected int errorCode = -1;
public LiveException(){}
public LiveException(int errorCode, String desc){
super(desc);
this.errorCode = errorCode;
}
public LiveException(String message) {
super(message);
}
public int getErrorCode() {
return errorCode;
}
}
|
package org.jesperancinha.java11.crums.crum14;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import static org.jesperancinha.console.consolerizer.common.ConsolerizerColor.BLUE;
import static org.jesperancinha.console.consolerizer.common.ConsolerizerColor.BRIGHT_CYAN;
import static org.jesperancinha.console.consolerizer.common.ConsolerizerColor.MAGENTA;
public class Crum14 {
public static void main(String[] args) {
BLUE.printGenericTitleLn("Crum 14 - Type Arrays legal operations");
final List<Double> list = new ArrayList<>();
list.add(111d);
BRIGHT_CYAN.printGenericLn(list);
BRIGHT_CYAN.printGenericLn(list.contains(111d));
BRIGHT_CYAN.printGenericLn(list.contains(111f));
BRIGHT_CYAN.printGenericLn(list.contains("WOW"));
BRIGHT_CYAN.printGenericLn(list.containsAll(List.of(111d)));
BRIGHT_CYAN.printGenericLn(list.containsAll(Collections.singletonList(111d)));
MAGENTA.printGenericLn("Notice that you can only add elements strictly of the type of the array");
MAGENTA.printGenericLn("Contains works with all types though");
MAGENTA.printGenericLn("Contains expects and Object and containsAll expects a list of Objects");
}
}
|
package lecho.lib.hellocharts.formatter;
import lecho.lib.hellocharts.model.AxisValue;
public class SimpleAxisValueFormatter implements AxisValueFormatter {
private ValueFormatterHelper valueFormatterHelper = new ValueFormatterHelper();
public SimpleAxisValueFormatter() {
valueFormatterHelper.determineDecimalSeparator();
}
public SimpleAxisValueFormatter(int decimalDigitsNumber) {
this();
valueFormatterHelper.setDecimalDigitsNumber(decimalDigitsNumber);
}
@Override
public int formatValueForManualAxis(char[] formattedValue, AxisValue axisValue) {
final int charsNumber = valueFormatterHelper.formatFloatValueWithPrependedAndAppendedText(formattedValue,
axisValue.getValue(), axisValue.getLabelAsChars());
return charsNumber;
}
@Override
public int formatValueForAutoGeneratedAxis(char[] formattedValue, float value, int autoDecimalDigits) {
final int charsNumber = valueFormatterHelper.formatFloatValueWithPrependedAndAppendedText(formattedValue,
value, autoDecimalDigits);
return charsNumber;
}
public int getDecimalDigitsNumber() {
return valueFormatterHelper.getDecimalDigitsNumber();
}
public SimpleAxisValueFormatter setDecimalDigitsNumber(int decimalDigitsNumber) {
valueFormatterHelper.setDecimalDigitsNumber(decimalDigitsNumber);
return this;
}
public char[] getAppendedText() {
return valueFormatterHelper.getAppendedText();
}
public SimpleAxisValueFormatter setAppendedText(char[] appendedText) {
valueFormatterHelper.setAppendedText(appendedText);
return this;
}
public char[] getPrependedText() {
return valueFormatterHelper.getPrependedText();
}
public SimpleAxisValueFormatter setPrependedText(char[] prependedText) {
valueFormatterHelper.setPrependedText(prependedText);
return this;
}
public char getDecimalSeparator() {
return valueFormatterHelper.getDecimalSeparator();
}
public SimpleAxisValueFormatter setDecimalSeparator(char decimalSeparator) {
valueFormatterHelper.setDecimalSeparator(decimalSeparator);
return this;
}
}
|
/*******************************************************************************
* Copyright (c) 2013, 2014 Oracle and/or its affiliates. All rights reserved.
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 and Eclipse Distribution License v. 1.0
* which accompanies this distribution.
* The Eclipse Public License is available at http://www.eclipse.org/legal/epl-v10.html
* and the Eclipse Distribution License is available at
* http://www.eclipse.org/org/documents/edl-v10.php.
*
* Contributors:
* Oracle - initial API and implementation
******************************************************************************/
package org.eclipse.persistence.annotations;
import java.lang.annotation.Retention;
import java.lang.annotation.Target;
import org.eclipse.persistence.sessions.serializers.JavaSerializer;
import org.eclipse.persistence.sessions.serializers.Serializer;
import static java.lang.annotation.ElementType.FIELD;
import static java.lang.annotation.ElementType.METHOD;
import static java.lang.annotation.ElementType.TYPE;
import static java.lang.annotation.RetentionPolicy.RUNTIME;
/**
* A SerializedConverter is used to serialize an object's value into a database binary, character, or XML field.
* This annotation allows a named converter that can be used in mappings.
*
* A converter must be be uniquely identified by name and can be defined at
* the class level and can be specified within an Entity,
* MappedSuperclass and Embeddable class.
*
* The usage of a SerializedConverter is always specified via the Converter annotation and
* is supported on a Basic, or ElementCollection mapping.
*
* @see org.eclipse.persistence.annotations.Converter
* @see org.eclipse.persistence.sessions.serializers.Serializer
* @author James Sutherland
* @since EclipseLink 2.6
*/
@Target({TYPE, METHOD, FIELD})
@Retention(RUNTIME)
public @interface SerializedConverter {
/**
* (Required) Name this converter. The name should be unique across the
* whole persistence unit.
*/
String name();
/**
* Allows a package name to be passed to the serializer.
* This is used by some serializers such as XML, JSON to initialize the
* JAXB context from the classes in the package or a jaxb.index file.
*/
String serializerPackage() default "";
/**
* The serializer class to be used. This class must implement the
* org.eclipse.persistence.sessions.serializers.Serializer interface.
*/
Class<? extends Serializer> serializerClass() default JavaSerializer.class;
}
|
package io.kluska.bsc.forms.reply.stats.service.infrastructure;
import org.springframework.boot.autoconfigure.security.SecurityProperties;
import org.springframework.context.annotation.Configuration;
import org.springframework.core.annotation.Order;
import org.springframework.http.HttpMethod;
import org.springframework.security.config.annotation.web.builders.HttpSecurity;
import org.springframework.security.config.annotation.web.builders.WebSecurity;
import org.springframework.security.config.annotation.web.configuration.WebSecurityConfigurerAdapter;
/**
* @author Mateusz Kluska
*/
@Configuration
@Order(SecurityProperties.ACCESS_OVERRIDE_ORDER)
public class WebSecurityConfig extends WebSecurityConfigurerAdapter {
@Override
protected void configure(HttpSecurity http) throws Exception {
http
.authorizeRequests().anyRequest().authenticated()
.and()
.csrf().disable();
}
@Override
public void configure(WebSecurity web) throws Exception {
web.ignoring()
.antMatchers(HttpMethod.POST, "/replies/**");
}
}
|
/*******************************************************************************
* Copyright (c) 2005, 2008 IBM Corporation and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* IBM Corporation - initial API and implementation
*******************************************************************************/
package org.eclipse.pde.internal.core;
import org.eclipse.pde.core.plugin.IPluginModelBase;
public interface IExtensionDeltaEvent {
/**
* Event is sent after the models have been added.
*/
int MODELS_ADDED = 0x1;
/**
* Event is sent before the models will be removed.
*/
int MODELS_REMOVED = 0x2;
/**
* Event is sent after the models have been changed.
*/
int MODELS_CHANGED = 0x4;
public IPluginModelBase[] getAddedModels();
public IPluginModelBase[] getChangedModels();
public IPluginModelBase[] getRemovedModels();
public int getEventTypes();
}
|
package learn.hyperskill.pod.p20210207chocolate;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.CsvSource;
import static org.junit.jupiter.api.Assertions.*;
class MainTest {
@ParameterizedTest
@CsvSource({"4,2,2", "4,2,4", "4,2,6", "7,4,21"})
void testPossible(int n, int m, int k) {
assertTrue(Main.possibleToBreak(n, m, k));
}
@ParameterizedTest
@CsvSource({"4,2,3", "4,2,1", "2,10,7"})
void testImpossible(int n, int m, int k) {
assertFalse(Main.possibleToBreak(n, m, k));
}
}
|
/*
* Copyright © 2017 camunda services GmbH (info@camunda.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.zeebe.client;
import static io.zeebe.client.ClientProperties.USE_PLAINTEXT_CONNECTION;
import static io.zeebe.client.impl.ZeebeClientBuilderImpl.CA_CERTIFICATE_VAR;
import static io.zeebe.client.impl.ZeebeClientBuilderImpl.KEEP_ALIVE_VAR;
import static io.zeebe.client.impl.ZeebeClientBuilderImpl.PLAINTEXT_CONNECTION_VAR;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
import io.zeebe.client.impl.ZeebeClientBuilderImpl;
import io.zeebe.client.util.ClientTest;
import io.zeebe.client.util.Environment;
import io.zeebe.client.util.EnvironmentRule;
import java.io.FileNotFoundException;
import java.time.Duration;
import java.util.Properties;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
public final class ZeebeClientTest extends ClientTest {
@Rule public final EnvironmentRule environmentRule = new EnvironmentRule();
@Rule public ExpectedException thrown = ExpectedException.none();
@Test
public void shouldNotFailIfClosedTwice() {
client.close();
client.close();
}
@Test
public void shouldHaveDefaultValues() {
// given
try (final ZeebeClient client = ZeebeClient.newClient()) {
// when
final ZeebeClientConfiguration configuration = client.getConfiguration();
// then
assertThat(configuration.getBrokerContactPoint()).isEqualTo("0.0.0.0:26500");
assertThat(configuration.getDefaultJobWorkerMaxJobsActive()).isEqualTo(32);
assertThat(configuration.getNumJobWorkerExecutionThreads()).isEqualTo(1);
assertThat(configuration.getDefaultJobWorkerName()).isEqualTo("default");
assertThat(configuration.getDefaultJobTimeout()).isEqualTo(Duration.ofMinutes(5));
assertThat(configuration.getDefaultJobPollInterval()).isEqualTo(Duration.ofMillis(100));
assertThat(configuration.getDefaultMessageTimeToLive()).isEqualTo(Duration.ofHours(1));
assertThat(configuration.getDefaultRequestTimeout()).isEqualTo(Duration.ofSeconds(20));
}
}
@Test
public void shouldFailIfCertificateDoesNotExist() {
assertThatThrownBy(
() -> ZeebeClient.newClientBuilder().caCertificatePath("/wrong/path").build())
.hasCauseInstanceOf(FileNotFoundException.class);
}
@Test
public void shouldFailWithEmptyCertificatePath() {
assertThatThrownBy(() -> ZeebeClient.newClientBuilder().caCertificatePath("").build())
.isInstanceOf(IllegalArgumentException.class);
}
@Test
public void shouldHaveTlsEnabledByDefault() {
assertThat(new ZeebeClientBuilderImpl().isPlaintextConnectionEnabled()).isFalse();
}
@Test
public void shouldUseInsecureWithEnvVar() {
// given
Environment.system().put(PLAINTEXT_CONNECTION_VAR, "true");
final ZeebeClientBuilderImpl builder = new ZeebeClientBuilderImpl();
// when
builder.build();
// then
assertThat(builder.isPlaintextConnectionEnabled()).isTrue();
}
@Test
public void shouldOverridePropertyWithEnvVariable() {
// given
Environment.system().put(PLAINTEXT_CONNECTION_VAR, "false");
final Properties properties = new Properties();
properties.putIfAbsent(USE_PLAINTEXT_CONNECTION, "");
final ZeebeClientBuilderImpl builder = new ZeebeClientBuilderImpl();
builder.withProperties(properties);
// when
builder.build();
// then
assertThat(builder.isPlaintextConnectionEnabled()).isFalse();
}
@Test
public void shouldCaCertificateWithEnvVar() {
// given
final String certPath = this.getClass().getClassLoader().getResource("ca.cert.pem").getPath();
Environment.system().put(CA_CERTIFICATE_VAR, certPath);
final ZeebeClientBuilderImpl builder = new ZeebeClientBuilderImpl();
// when
builder.build();
// then
assertThat(builder.getCaCertificatePath()).isEqualTo(certPath);
}
@Test
public void shouldSetKeepAlive() {
// given
final ZeebeClientBuilderImpl builder = new ZeebeClientBuilderImpl();
builder.keepAlive(Duration.ofMinutes(2));
// when
builder.build();
// then
assertThat(builder.getKeepAlive()).isEqualTo(Duration.ofMinutes(2));
}
@Test
public void shouldOverrideKeepAliveWithEnvVar() {
// given
final ZeebeClientBuilderImpl builder = new ZeebeClientBuilderImpl();
builder.keepAlive(Duration.ofMinutes(2));
Environment.system().put(KEEP_ALIVE_VAR, "15000");
// when
builder.build();
// then
assertThat(builder.getKeepAlive()).isEqualTo(Duration.ofSeconds(15));
}
@Test
public void shouldRejectUnsupportedTimeUnitWithEnvVar() {
// when/then
Environment.system().put(KEEP_ALIVE_VAR, "30d");
assertThatThrownBy(() -> new ZeebeClientBuilderImpl().build())
.isInstanceOf(IllegalArgumentException.class);
}
@Test
public void shouldRejectNegativeTime() {
// when/then
assertThatThrownBy(() -> new ZeebeClientBuilderImpl().keepAlive(Duration.ofSeconds(-2)).build())
.isInstanceOf(IllegalArgumentException.class);
}
@Test
public void shouldRejectNegativeTimeAsEnvVar() {
// when/then
Environment.system().put(KEEP_ALIVE_VAR, "-2s");
assertThatThrownBy(() -> new ZeebeClientBuilderImpl().build())
.isInstanceOf(IllegalArgumentException.class);
}
}
|
package uk.ac.ebi.embl.template.reader;
public enum TemplateTokenType {
TAXON_FIELD, DATE_FIELD, TEXT_FIELD, TEXT_CHOICE_FIELD, INTEGER_FIELD, TEXT_AREA_FIELD, BOOLEAN_FIELD, start_location, end_location;
}
|
package com.project.space;
import android.content.Context;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.ImageView;
import android.widget.TextView;
import androidx.annotation.NonNull;
import androidx.recyclerview.widget.RecyclerView;
import com.google.firebase.auth.FirebaseAuth;
import com.google.firebase.auth.FirebaseUser;
import com.google.firebase.database.FirebaseDatabase;
import java.util.ArrayList;
public class MembersAdapter extends RecyclerView.Adapter<MembersAdapter.MembersViewHolder> {
ArrayList<CreateUser> nameList;
Context c;
MembersAdapter(ArrayList<CreateUser>nameList, Context c){
this.nameList = nameList;
this.c = c;
}
@Override
public int getItemCount() {
return nameList.size();
}
@NonNull
@Override
public MembersAdapter.MembersViewHolder onCreateViewHolder(@NonNull ViewGroup parent, int viewType) {
View v = LayoutInflater.from(parent.getContext()).inflate(R.layout.card_layout,parent,false);
MembersViewHolder membersViewHolder = new MembersViewHolder(v, c,nameList);
return membersViewHolder;
}
@Override
public void onBindViewHolder(@NonNull MembersAdapter.MembersViewHolder holder, int position) {
CreateUser currentUserObj = nameList.get(position);
holder.name_txt.setText(currentUserObj.name);
if(currentUserObj.isSharing.equals("false")){
holder.status.setImageResource(R.drawable.notok);
}else{
holder.status.setImageResource(R.drawable.ok);
}
}
public static class MembersViewHolder extends RecyclerView.ViewHolder implements View.OnClickListener{
TextView name_txt;
View v;
ImageView status;
Context c;
ArrayList<CreateUser> nameArrayList;
FirebaseAuth mAuth;
FirebaseUser user;
public MembersViewHolder(@NonNull View itemView, Context c, ArrayList<CreateUser> nameArrayList) {
super(itemView);
this.c = c;
this.nameArrayList = nameArrayList;
itemView.setOnClickListener(this);
mAuth = FirebaseAuth.getInstance();
user = mAuth.getCurrentUser();
name_txt = itemView.findViewById(R.id.item_title);
status = itemView.findViewById(R.id.status_img);
}
@Override
public void onClick(View v) {
}
}
}
|
package net.npg.abattle.client.view.boardscene;
import org.junit.Assert;
import org.junit.Test;
import com.badlogic.gdx.graphics.Color;
public class RenderUtilTest {
@Test
public final void testUtils() throws Exception {
for (int i = 0; i < 25; i++) {
final int x = RendererUtils.getAnimationNumber(0.5f, 10);
System.out.println(x);
Thread.sleep(100);
}
Assert.assertEquals(1, RendererUtils.getAnimationNumber(0.5f, 10));
}
@Test
public void testColor() {
for (int animPos = 0; animPos < 10; animPos++) {
System.out.println(">" + animPos);
final float vx = 20;
final float offx = vx * (animPos % 5) / 5.0f;
System.out.println("->" + offx);
int i = -1;
int c = animPos >= 5 ? 0 : -1;
System.out.println(colorArrow(Color.BLACK, Color.WHITE, c) + " - " + 0);
c++;
for (i++; i <= 4; i++) {
System.out.println(colorArrow(Color.BLACK, Color.WHITE, c) + " - " + (vx * i + offx));
c++;
}
System.out.println(colorArrow(Color.BLACK, Color.WHITE, c) + " - " + 100);
System.out.println("----------------------------------------------");
}
}
private Color colorArrow(final Color one, final Color two, final int i) {
return i % 2 == 0 ? one : two;
}
}
|
package com.thinkenterprise.domain.route.graphql.resolver.query;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.data.domain.Page;
import org.springframework.data.domain.PageRequest;
import org.springframework.data.domain.Pageable;
import org.springframework.stereotype.Component;
import org.springframework.web.bind.annotation.ExceptionHandler;
import com.thinkenterprise.domain.route.exceptions.RouteNotFoundException;
import com.thinkenterprise.domain.route.model.jpa.Route;
import com.thinkenterprise.domain.route.model.jpa.RouteRepository;
import com.thinkenterprise.graphql.context.CustomGraphQLServletContext;
import graphql.GraphQLError;
import graphql.GraphqlErrorBuilder;
import graphql.kickstart.spring.error.ErrorContext;
import graphql.kickstart.tools.GraphQLQueryResolver;
import graphql.schema.DataFetchingEnvironment;
/**
* GraphQL Spring Boot Training Design and Development by Michael Schäfer
* Copyright (c) 2020 All Rights Reserved.
*
* @author Michael Schäfer
*/
@Component
public class RootQueryResolver implements GraphQLQueryResolver {
protected static Logger log = LoggerFactory.getLogger(RootQueryResolver.class);
private RouteRepository routeRepository;
@Autowired
public RootQueryResolver(RouteRepository routeRepository) {
this.routeRepository = routeRepository;
}
public Route route(String flightNumber) {
Optional<Route> route = routeRepository.findByFlightNumber(flightNumber);
return route.get();
}
public List<Route> routes(int page, int size) {
Pageable pageable = PageRequest.of(page, size);
Page<Route> pageResult = routeRepository.findAll(pageable);
return pageResult.toList();
}
}
|
/*
* Copyright 2000-2018 Vaadin Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.vaadin.cdi.itest.uicontext;
import com.vaadin.cdi.itest.uicontext.UIScopedLabel.SetTextEvent;
import com.vaadin.flow.component.UI;
import com.vaadin.flow.component.html.Div;
import com.vaadin.flow.component.html.Label;
import com.vaadin.flow.component.html.NativeButton;
import com.vaadin.flow.router.Route;
import com.vaadin.flow.router.RouterLink;
import javax.annotation.PostConstruct;
import javax.enterprise.event.Event;
import javax.inject.Inject;
@Route("")
public class UIContextRootView extends Div {
public static final String CLOSE_UI_BTN = "CLOSE_UI_BTN";
public static final String CLOSE_SESSION_BTN = "CLOSE_SESSION_BTN";
public static final String TRIGGER_EVENT_BTN = "TRIGGER_EVENT_BTN";
public static final String INJECTER_LINK = "injecter view";
public static final String UISCOPED_LINK = "uiscoped view";
public static final String UIID_LABEL = "UIID_LABEL";
public static final String NORMALSCOPED_LINK = "normalscoped bean view";
public static final String EVENT_PAYLOAD = "EVENT_PAYLOAD";
@Inject
private UIScopedLabel label;
@Inject
private Event<SetTextEvent> setTextEventTrigger;
@PostConstruct
private void init() {
final String uiIdStr = UI.getCurrent().getUIId() + "";
label.setText(uiIdStr);
final Label uiId = new Label(uiIdStr);
uiId.setId(UIID_LABEL);
final NativeButton closeUI = new NativeButton("close UI",
event -> getUI().ifPresent(UI::close));
closeUI.setId(CLOSE_UI_BTN);
final NativeButton closeSession = new NativeButton("close session",
event -> getUI().ifPresent(ui -> ui.getSession().close()));
closeSession.setId(CLOSE_SESSION_BTN);
final NativeButton triggerEvent = new NativeButton("event trigger",
event -> setTextEventTrigger.fire(new SetTextEvent(EVENT_PAYLOAD)));
triggerEvent.setId(TRIGGER_EVENT_BTN);
final Div navDiv = new Div(
new RouterLink(INJECTER_LINK, UIScopeInjecterView.class),
new RouterLink(UISCOPED_LINK, UIScopedView.class),
new RouterLink(NORMALSCOPED_LINK, UINormalScopedBeanView.class)
);
add(
new Div(uiId),
new Div(closeUI, closeSession),
new Div(triggerEvent),
new Div(this.label),
navDiv);
}
}
|
/*
* Copyright (C) 2016 Square, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package okhttp3.internal.tls;
import java.security.GeneralSecurityException;
import java.security.SecureRandom;
import java.security.cert.X509Certificate;
import java.util.Collections;
import javax.net.ssl.KeyManager;
import javax.net.ssl.SSLContext;
import javax.net.ssl.SSLHandshakeException;
import javax.net.ssl.SSLPeerUnverifiedException;
import javax.net.ssl.SSLSocketFactory;
import javax.net.ssl.TrustManager;
import javax.net.ssl.X509KeyManager;
import javax.net.ssl.X509TrustManager;
import okhttp3.Call;
import okhttp3.CertificatePinner;
import okhttp3.OkHttpClient;
import okhttp3.RecordingHostnameVerifier;
import okhttp3.Request;
import okhttp3.Response;
import okhttp3.internal.platform.Platform;
import okhttp3.mockwebserver.MockResponse;
import okhttp3.mockwebserver.MockWebServer;
import okhttp3.mockwebserver.SocketPolicy;
import okhttp3.tls.HandshakeCertificates;
import okhttp3.tls.HeldCertificate;
import org.junit.Rule;
import org.junit.Test;
import static okhttp3.TestUtil.defaultClient;
import static okhttp3.internal.platform.PlatformTest.getJvmSpecVersion;
import static okhttp3.internal.platform.PlatformTest.getPlatform;
import static okhttp3.tls.internal.TlsUtil.newKeyManager;
import static okhttp3.tls.internal.TlsUtil.newTrustManager;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assert.fail;
import static org.junit.Assume.assumeFalse;
public final class CertificatePinnerChainValidationTest {
@Rule public final MockWebServer server = new MockWebServer();
/** The pinner should pull the root certificate from the trust manager. */
@Test public void pinRootNotPresentInChain() throws Exception {
// TODO https://github.com/square/okhttp/issues/4703
assumeFalse(getJvmSpecVersion().equals("11"));
HeldCertificate rootCa = new HeldCertificate.Builder()
.serialNumber(1L)
.certificateAuthority(1)
.commonName("root")
.build();
HeldCertificate intermediateCa = new HeldCertificate.Builder()
.signedBy(rootCa)
.certificateAuthority(0)
.serialNumber(2L)
.commonName("intermediate_ca")
.build();
HeldCertificate certificate = new HeldCertificate.Builder()
.signedBy(intermediateCa)
.serialNumber(3L)
.commonName(server.getHostName())
.build();
CertificatePinner certificatePinner = new CertificatePinner.Builder()
.add(server.getHostName(), CertificatePinner.pin(rootCa.certificate()))
.build();
HandshakeCertificates handshakeCertificates = new HandshakeCertificates.Builder()
.addTrustedCertificate(rootCa.certificate())
.build();
OkHttpClient client = defaultClient().newBuilder()
.sslSocketFactory(
handshakeCertificates.sslSocketFactory(), handshakeCertificates.trustManager())
.hostnameVerifier(new RecordingHostnameVerifier())
.certificatePinner(certificatePinner)
.build();
HandshakeCertificates serverHandshakeCertificates = new HandshakeCertificates.Builder()
.heldCertificate(certificate, intermediateCa.certificate())
.build();
server.useHttps(serverHandshakeCertificates.sslSocketFactory(), false);
// The request should complete successfully.
server.enqueue(new MockResponse()
.setBody("abc")
.setSocketPolicy(SocketPolicy.DISCONNECT_AT_END));
Call call1 = client.newCall(new Request.Builder()
.url(server.url("/"))
.build());
Response response1 = call1.execute();
assertThat(response1.body().string()).isEqualTo("abc");
// Confirm that a second request also succeeds. This should detect caching problems.
server.enqueue(new MockResponse()
.setBody("def")
.setSocketPolicy(SocketPolicy.DISCONNECT_AT_END));
Call call2 = client.newCall(new Request.Builder()
.url(server.url("/"))
.build());
Response response2 = call2.execute();
assertThat(response2.body().string()).isEqualTo("def");
}
/** The pinner should accept an intermediate from the server's chain. */
@Test public void pinIntermediatePresentInChain() throws Exception {
// TODO https://github.com/square/okhttp/issues/4703
assumeFalse(getJvmSpecVersion().equals("11"));
HeldCertificate rootCa = new HeldCertificate.Builder()
.serialNumber(1L)
.certificateAuthority(1)
.commonName("root")
.build();
HeldCertificate intermediateCa = new HeldCertificate.Builder()
.signedBy(rootCa)
.certificateAuthority(0)
.serialNumber(2L)
.commonName("intermediate_ca")
.build();
HeldCertificate certificate = new HeldCertificate.Builder()
.signedBy(intermediateCa)
.serialNumber(3L)
.commonName(server.getHostName())
.build();
CertificatePinner certificatePinner = new CertificatePinner.Builder()
.add(server.getHostName(), CertificatePinner.pin(intermediateCa.certificate()))
.build();
HandshakeCertificates handshakeCertificates = new HandshakeCertificates.Builder()
.addTrustedCertificate(rootCa.certificate())
.build();
OkHttpClient client = defaultClient().newBuilder()
.sslSocketFactory(
handshakeCertificates.sslSocketFactory(), handshakeCertificates.trustManager())
.hostnameVerifier(new RecordingHostnameVerifier())
.certificatePinner(certificatePinner)
.build();
HandshakeCertificates serverHandshakeCertificates = new HandshakeCertificates.Builder()
.heldCertificate(certificate, intermediateCa.certificate())
.build();
server.useHttps(serverHandshakeCertificates.sslSocketFactory(), false);
// The request should complete successfully.
server.enqueue(new MockResponse()
.setBody("abc")
.setSocketPolicy(SocketPolicy.DISCONNECT_AT_END));
Call call1 = client.newCall(new Request.Builder()
.url(server.url("/"))
.build());
Response response1 = call1.execute();
assertThat(response1.body().string()).isEqualTo("abc");
response1.close();
// Force a fresh connection for the next request.
client.connectionPool().evictAll();
// Confirm that a second request also succeeds. This should detect caching problems.
server.enqueue(new MockResponse()
.setBody("def")
.setSocketPolicy(SocketPolicy.DISCONNECT_AT_END));
Call call2 = client.newCall(new Request.Builder()
.url(server.url("/"))
.build());
Response response2 = call2.execute();
assertThat(response2.body().string()).isEqualTo("def");
response2.close();
}
@Test public void unrelatedPinnedLeafCertificateInChain() throws Exception {
// Start with a trusted root CA certificate.
HeldCertificate rootCa = new HeldCertificate.Builder()
.serialNumber(1L)
.certificateAuthority(1)
.commonName("root")
.build();
// Add a good intermediate CA, and have that issue a good certificate to localhost. Prepare an
// SSL context for an HTTP client under attack. It includes the trusted CA and a pinned
// certificate.
HeldCertificate goodIntermediateCa = new HeldCertificate.Builder()
.signedBy(rootCa)
.certificateAuthority(0)
.serialNumber(2L)
.commonName("good_intermediate_ca")
.build();
HeldCertificate goodCertificate = new HeldCertificate.Builder()
.signedBy(goodIntermediateCa)
.serialNumber(3L)
.commonName(server.getHostName())
.build();
CertificatePinner certificatePinner = new CertificatePinner.Builder()
.add(server.getHostName(), CertificatePinner.pin(goodCertificate.certificate()))
.build();
HandshakeCertificates handshakeCertificates = new HandshakeCertificates.Builder()
.addTrustedCertificate(rootCa.certificate())
.build();
OkHttpClient client = defaultClient().newBuilder()
.sslSocketFactory(
handshakeCertificates.sslSocketFactory(), handshakeCertificates.trustManager())
.hostnameVerifier(new RecordingHostnameVerifier())
.certificatePinner(certificatePinner)
.build();
// Add a bad intermediate CA and have that issue a rogue certificate for localhost. Prepare
// an SSL context for an attacking webserver. It includes both these rogue certificates plus the
// trusted good certificate above. The attack is that by including the good certificate in the
// chain, we may trick the certificate pinner into accepting the rouge certificate.
HeldCertificate compromisedIntermediateCa = new HeldCertificate.Builder()
.signedBy(rootCa)
.certificateAuthority(0)
.serialNumber(4L)
.commonName("bad_intermediate_ca")
.build();
HeldCertificate rogueCertificate = new HeldCertificate.Builder()
.serialNumber(5L)
.signedBy(compromisedIntermediateCa)
.commonName(server.getHostName())
.build();
SSLSocketFactory socketFactory = newServerSocketFactory(rogueCertificate,
compromisedIntermediateCa.certificate(), goodCertificate.certificate());
server.useHttps(socketFactory, false);
server.enqueue(new MockResponse()
.setBody("abc")
.addHeader("Content-Type: text/plain"));
// Make a request from client to server. It should succeed certificate checks (unfortunately the
// rogue CA is trusted) but it should fail certificate pinning.
Request request = new Request.Builder()
.url(server.url("/"))
.build();
Call call = client.newCall(request);
try {
call.execute();
fail();
} catch (SSLPeerUnverifiedException expected) {
// Certificate pinning fails!
String message = expected.getMessage();
assertThat(message).startsWith("Certificate pinning failure!");
}
}
@Test public void unrelatedPinnedIntermediateCertificateInChain() throws Exception {
// Start with two root CA certificates, one is good and the other is compromised.
HeldCertificate rootCa = new HeldCertificate.Builder()
.serialNumber(1L)
.certificateAuthority(1)
.commonName("root")
.build();
HeldCertificate compromisedRootCa = new HeldCertificate.Builder()
.serialNumber(2L)
.certificateAuthority(1)
.commonName("compromised_root")
.build();
// Add a good intermediate CA, and have that issue a good certificate to localhost. Prepare an
// SSL context for an HTTP client under attack. It includes the trusted CA and a pinned
// certificate.
HeldCertificate goodIntermediateCa = new HeldCertificate.Builder()
.signedBy(rootCa)
.certificateAuthority(0)
.serialNumber(3L)
.commonName("intermediate_ca")
.build();
CertificatePinner certificatePinner = new CertificatePinner.Builder()
.add(server.getHostName(), CertificatePinner.pin(goodIntermediateCa.certificate()))
.build();
HandshakeCertificates handshakeCertificates = new HandshakeCertificates.Builder()
.addTrustedCertificate(rootCa.certificate())
.addTrustedCertificate(compromisedRootCa.certificate())
.build();
OkHttpClient client = defaultClient().newBuilder()
.sslSocketFactory(
handshakeCertificates.sslSocketFactory(), handshakeCertificates.trustManager())
.hostnameVerifier(new RecordingHostnameVerifier())
.certificatePinner(certificatePinner)
.build();
// The attacker compromises the root CA, issues an intermediate with the same common name
// "intermediate_ca" as the good CA. This signs a rogue certificate for localhost. The server
// serves the good CAs certificate in the chain, which means the certificate pinner sees a
// different set of certificates than the SSL verifier.
HeldCertificate compromisedIntermediateCa = new HeldCertificate.Builder()
.signedBy(compromisedRootCa)
.certificateAuthority(0)
.serialNumber(4L)
.commonName("intermediate_ca")
.build();
HeldCertificate rogueCertificate = new HeldCertificate.Builder()
.serialNumber(5L)
.signedBy(compromisedIntermediateCa)
.commonName(server.getHostName())
.build();
SSLSocketFactory socketFactory = newServerSocketFactory(rogueCertificate,
goodIntermediateCa.certificate(), compromisedIntermediateCa.certificate());
server.useHttps(socketFactory, false);
server.enqueue(new MockResponse()
.setBody("abc")
.addHeader("Content-Type: text/plain"));
// Make a request from client to server. It should succeed certificate checks (unfortunately the
// rogue CA is trusted) but it should fail certificate pinning.
Request request = new Request.Builder()
.url(server.url("/"))
.build();
Call call = client.newCall(request);
try {
call.execute();
fail();
} catch (SSLHandshakeException expected) {
// On Android, the handshake fails before the certificate pinner runs.
String message = expected.getMessage();
assertThat(message).contains("Could not validate certificate");
} catch (SSLPeerUnverifiedException expected) {
// On OpenJDK, the handshake succeeds but the certificate pinner fails.
String message = expected.getMessage();
assertThat(message).startsWith("Certificate pinning failure!");
}
}
private SSLSocketFactory newServerSocketFactory(HeldCertificate heldCertificate,
X509Certificate... intermediates) throws GeneralSecurityException {
// Test setup fails on JDK9
// java.security.KeyStoreException: Certificate chain is not valid
// at sun.security.pkcs12.PKCS12KeyStore.setKeyEntry
// http://openjdk.java.net/jeps/229
// http://hg.openjdk.java.net/jdk9/jdk9/jdk/file/2c1c21d11e58/src/share/classes/sun/security/pkcs12/PKCS12KeyStore.java#l596
String keystoreType = getPlatform().equals("jdk9") ? "JKS" : null;
X509KeyManager x509KeyManager = newKeyManager(keystoreType, heldCertificate, intermediates);
X509TrustManager trustManager = newTrustManager(keystoreType, Collections.emptyList());
SSLContext sslContext = Platform.get().getSSLContext();
sslContext.init(new KeyManager[] { x509KeyManager }, new TrustManager[] { trustManager },
new SecureRandom());
return sslContext.getSocketFactory();
}
}
|
/*
Copyright (C) GridGain Systems. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/* _________ _____ __________________ _____
* __ ____/___________(_)______ /__ ____/______ ____(_)_______
* _ / __ __ ___/__ / _ __ / _ / __ _ __ `/__ / __ __ \
* / /_/ / _ / _ / / /_/ / / /_/ / / /_/ / _ / _ / / /
* \____/ /_/ /_/ \_,__/ \____/ \__,_/ /_/ /_/ /_/
*/
package org.gridgain.grid.kernal.processors.cache.distributed.near;
import org.gridgain.grid.cache.*;
import org.gridgain.grid.kernal.processors.cache.*;
import static org.gridgain.grid.cache.GridCacheMode.*;
import static org.gridgain.grid.cache.GridCachePreloadMode.*;
import static org.gridgain.grid.cache.GridCacheWriteSynchronizationMode.*;
/**
* Partitioned cache metrics test.
*/
public class GridCachePartitionedMetricsSelfTest extends GridCacheTransactionalAbstractMetricsSelfTest {
/** */
private static final int GRID_CNT = 2;
/** {@inheritDoc} */
@Override protected GridCacheConfiguration cacheConfiguration(String gridName) throws Exception {
GridCacheConfiguration cfg = super.cacheConfiguration(gridName);
cfg.setCacheMode(PARTITIONED);
cfg.setBackups(gridCount() - 1);
cfg.setPreloadMode(SYNC);
cfg.setTxSerializableEnabled(true);
cfg.setWriteSynchronizationMode(FULL_SYNC);
return cfg;
}
/** {@inheritDoc} */
@Override protected int gridCount() {
return GRID_CNT;
}
}
|
/*
* Author: Benton Li '19
* Version: 1.0
*
* */
package org.firstinspires.ftc.teamcode;
import com.qualcomm.robotcore.eventloop.opmode.Autonomous;
import com.qualcomm.robotcore.eventloop.opmode.TeleOp;
import com.qualcomm.robotcore.hardware.DcMotorSimple;
import com.qualcomm.robotcore.util.ElapsedTime;
import com.qualcomm.robotcore.eventloop.opmode.OpMode;
import com.qualcomm.robotcore.hardware.DcMotor;
@TeleOp(name="Beta-Driving")
public class BDriving extends OpMode{
private DcMotor left = null;
private DcMotor right = null;
private DcMotor dick = null;
//set up encoders
static final double COUNTS_Per_REV = 1140 ;
static final double WHEEL_DIAMETER = 4 ; //in inches
static final double COUNTS_Per_INCH = COUNTS_Per_REV/(WHEEL_DIAMETER*Math.PI);
static final double COUNTS_Per_DEGREE = COUNTS_Per_REV/((130)/WHEEL_DIAMETER);
private int counter = 0;
private double speed = .5 ;
@Override
public void init() {
left = hardwareMap.get(DcMotor.class, "mot0");
right = hardwareMap.get(DcMotor.class, "mot1");
dick = hardwareMap.get(DcMotor.class, "mot2");
left.setDirection(DcMotor.Direction.FORWARD);
right.setDirection(DcMotor.Direction.REVERSE);
dick.setDirection(DcMotor.Direction.REVERSE);
telemetry.addData("Status", "Initialized");
}
@Override
public void init_loop() {
}
@Override
public void start() {
}
public ElapsedTime runTime = new ElapsedTime();
public void checkKeys(){
if (gamepad1.x == true){
speed = speed + 0.1;
}
if (gamepad1.y == true){
speed = speed + 0.1;
}
if (gamepad1.left_bumper){
liftUp();
}
}
public void liftUp(){
dick.setMode(DcMotor.RunMode.RUN_TO_POSITION);
dick.setTargetPosition((int)(10 * COUNTS_Per_INCH));
dick.setPower(speed);
while (dick.isBusy()){
telemetry.addData("Climbing",100*dick.getCurrentPosition()/(10 * COUNTS_Per_INCH)+"%");
telemetry.update();
}
dick.setPower(0);
}
@Override
public void loop() {
checkKeys();
left.setPower(speed*(-gamepad1.left_stick_x+gamepad1.right_stick_y));
right.setPower(speed*(-gamepad1.left_stick_x-gamepad1.right_stick_y));
dick.setPower(gamepad1.left_trigger - gamepad1.right_trigger);
telemetry.addData("Speed:",speed);
telemetry.update();
}
}
|
/**
* Copyright (c) 2018-2028, Chill Zhuang 庄骞 (smallchill@163.com).
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springblade.platform.baobiaowenjian.mapper;
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import org.springblade.platform.baobiaowenjian.entity.BaobiaoMulu;
import org.springblade.platform.baobiaowenjian.page.BaobiaoWenjianPage;
import org.springblade.platform.baobiaowenjian.vo.BaobiaoMuluVO;
import java.util.List;
/**
* Mapper 接口
*
* @author hyp
* @since 2019-05-16
*/
public interface BaobiaoMuluMapper extends BaseMapper<BaobiaoMulu> {
/**
* 自定义分页
*
* @param baobiaoWenjianPage
* @return
*/
List<BaobiaoMuluVO> selectBaogaoPage(BaobiaoWenjianPage baobiaoWenjianPage);
/**
* 统计
*
* @param baobiaoWenjianPage
* @return
*/
int selectBaogaoTotal(BaobiaoWenjianPage baobiaoWenjianPage);
}
|
// Copyright 2000-2020 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.intellij.codeInspection.java18StreamApi;
import com.intellij.codeInspection.AbstractBaseJavaLocalInspectionTool;
import com.intellij.codeInspection.LocalQuickFix;
import com.intellij.codeInspection.ProblemDescriptor;
import com.intellij.codeInspection.ProblemsHolder;
import com.intellij.java.JavaBundle;
import com.intellij.openapi.diagnostic.Logger;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.util.InvalidDataException;
import com.intellij.openapi.util.WriteExternalException;
import com.intellij.openapi.util.text.StringUtil;
import com.intellij.pom.java.JavaFeature;
import com.intellij.psi.*;
import org.jdom.Element;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import javax.swing.*;
import java.util.Collection;
/**
* @author Dmitry Batkovich
*/
public class StaticPseudoFunctionalStyleMethodInspection extends AbstractBaseJavaLocalInspectionTool {
private final static Logger LOG = Logger.getInstance(StaticPseudoFunctionalStyleMethodInspection.class);
private final StaticPseudoFunctionalStyleMethodOptions myOptions = new StaticPseudoFunctionalStyleMethodOptions();
@Override
public void readSettings(@NotNull Element node) throws InvalidDataException {
myOptions.readExternal(node);
}
@Override
public void writeSettings(@NotNull Element node) throws WriteExternalException {
myOptions.writeExternal(node);
}
@Nullable
@Override
public JComponent createOptionsPanel() {
return myOptions.createPanel();
}
@NotNull
@Override
public PsiElementVisitor buildVisitor(@NotNull final ProblemsHolder holder, boolean isOnTheFly) {
if (!JavaFeature.STREAMS.isFeatureSupported(holder.getFile())) {
return PsiElementVisitor.EMPTY_VISITOR;
}
return new JavaElementVisitor() {
@Override
public void visitMethodCallExpression(PsiMethodCallExpression methodCallExpression) {
String qName = methodCallExpression.getMethodExpression().getQualifiedName();
if (qName == null) {
return;
}
qName = StringUtil.getShortName(qName);
final Collection<StaticPseudoFunctionalStyleMethodOptions.PipelineElement> handlerInfos = myOptions.findElementsByMethodName(qName);
if (handlerInfos.isEmpty()) {
return;
}
final PsiMethod method = methodCallExpression.resolveMethod();
if (method == null) {
return;
}
final PsiClass aClass = method.getContainingClass();
if (aClass == null) {
return;
}
final String classQualifiedName = aClass.getQualifiedName();
if (classQualifiedName == null) {
return;
}
StaticPseudoFunctionalStyleMethodOptions.PipelineElement suitableHandler = null;
for (StaticPseudoFunctionalStyleMethodOptions.PipelineElement h : handlerInfos) {
if (h.getHandlerClass().equals(classQualifiedName)) {
suitableHandler = h;
break;
}
}
if (suitableHandler == null) {
return;
}
final PseudoLambdaReplaceTemplate.ValidationInfo validationInfo = suitableHandler.getTemplate().validate(methodCallExpression);
if (validationInfo != null) {
holder.registerProblem(methodCallExpression.getMethodExpression(),
JavaBundle.message("inspection.message.pseudo.functional.style.code"),
new ReplacePseudoLambdaWithLambda(suitableHandler));
}
}
};
}
public static final class ReplacePseudoLambdaWithLambda implements LocalQuickFix {
private final StaticPseudoFunctionalStyleMethodOptions.PipelineElement myHandler;
private ReplacePseudoLambdaWithLambda(StaticPseudoFunctionalStyleMethodOptions.PipelineElement handler) {
myHandler = handler;
}
@NotNull
@Override
public String getFamilyName() {
return JavaBundle.message("quickfix.family.replace.with.java.stream.api.pipeline");
}
@Override
public void applyFix(@NotNull Project project, @NotNull ProblemDescriptor descriptor) {
final PsiElement psiElement = descriptor.getPsiElement();
if (psiElement instanceof PsiReferenceExpression) {
PsiElement parent = psiElement.getParent();
if (parent instanceof PsiMethodCallExpression) {
myHandler.getTemplate().convertToStream((PsiMethodCallExpression)parent, null, false);
}
}
}
}
}
|
/*******************************************************************************
* MIT License
*
* Copyright (c) 2017 CNES
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*******************************************************************************/
package fr.cnes.mal.broker;
import java.util.Map;
import java.util.Vector;
import org.ccsds.moims.mo.mal.MALException;
import org.ccsds.moims.mo.mal.structures.Blob;
import org.ccsds.moims.mo.mal.structures.EntityKey;
import org.ccsds.moims.mo.mal.structures.EntityKeyList;
import org.ccsds.moims.mo.mal.structures.EntityRequest;
import org.ccsds.moims.mo.mal.structures.EntityRequestList;
import org.ccsds.moims.mo.mal.structures.IdentifierList;
import org.ccsds.moims.mo.mal.structures.Subscription;
import org.ccsds.moims.mo.mal.transport.MALMessageHeader;
import org.objectweb.util.monolog.api.BasicLevel;
import org.objectweb.util.monolog.api.Logger;
import fr.cnes.mal.broker.internal.Broker;
import fr.cnes.mal.broker.internal.BrokerEntityRequest;
import fr.cnes.mal.broker.internal.BrokerNotification;
import fr.cnes.mal.broker.internal.BrokerPublication;
import fr.cnes.mal.broker.internal.PublisherContext;
import fr.cnes.mal.broker.internal.UnknownEntityException;
import fr.cnes.mal.broker.internal.UnknownPublisherException;
public class BrokerAdapter {
public final static Logger logger = fr.dyade.aaa.common.Debug
.getLogger(BrokerAdapter.class.getName());
private Broker broker;
private Blob authenticationId;
public BrokerAdapter(Broker broker, Blob authenticationId) {
super();
this.broker = broker;
this.authenticationId = authenticationId;
}
public Blob getAuthenticationId() {
return authenticationId;
}
public synchronized void handleRegister(MALMessageHeader header,
Subscription subscription, Map qosProperties) throws MALException {
if (logger.isLoggable(BasicLevel.DEBUG))
logger.log(BasicLevel.DEBUG, "TopicHandler.handleRegister("
+ header + ',' + subscription + ')');
try {
EntityRequestList entityList = subscription.getEntities();
if (logger.isLoggable(BasicLevel.DEBUG))
logger.log(BasicLevel.DEBUG, "entityList=" + entityList);
Vector brokerEntityRequestList = new Vector();
for (int i = 0; i < entityList.size(); i++) {
EntityRequest entityRequest = (EntityRequest) entityList.get(i);
EntityKeyList entityKeys = entityRequest.getEntityKeys();
for (int j = 0; j < entityKeys.size(); j++) {
EntityKey key = entityKeys.get(j);
brokerEntityRequestList.addElement(new BrokerEntityRequest(
entityRequest.getSubDomain(),
entityRequest.getAllAreas(),
entityRequest.getAllServices(),
entityRequest.getAllOperations(),
key,
entityRequest.getOnlyOnChange().booleanValue()));
}
}
BrokerEntityRequest[] brokerEntityRequests = new BrokerEntityRequest[brokerEntityRequestList
.size()];
brokerEntityRequestList.copyInto(brokerEntityRequests);
broker.register(header.getURIFrom(),
header.getTransactionId(),
header.getDomain(),
header.getNetworkZone(),
header.getSession(),
header.getSessionName(),
header.getQoSlevel(),
qosProperties,
header.getPriority(),
subscription.getSubscriptionId(),
brokerEntityRequests,
header.getServiceArea(),
header.getService(),
header.getOperation(),
header.getAreaVersion());
} catch (Exception exc) {
if (logger.isLoggable(BasicLevel.DEBUG))
logger.log(BasicLevel.DEBUG, "", exc);
throw new MALException(exc.toString(), exc);
}
}
public synchronized void handlePublishRegister(MALMessageHeader header,
EntityKeyList entityKeys) throws MALException {
if (logger.isLoggable(BasicLevel.DEBUG))
logger.log(BasicLevel.DEBUG, "TopicHandler.handlePublishRegister(" +
header + ',' + entityKeys + ')');
PublisherContext publisherContext = broker.registerPublisher(
header.getURIFrom(),
header.getTransactionId(),
header.getDomain(),
header.getNetworkZone(),
header.getSession(),
header.getSessionName(),
header.getQoSlevel(),
header.getPriority(),
entityKeys,
header.getServiceArea(),
header.getService(),
header.getOperation(),
header.getAreaVersion());
// Update the QoS and priority levels so that the ack
// QoS and priority header fields are assigned with the FIRST
// PUBLISH REGISTER values.
if (logger.isLoggable(BasicLevel.DEBUG))
logger.log(BasicLevel.DEBUG, "publisherContext=" + publisherContext);
header.setQoSlevel(publisherContext.getQos());
header.setPriority(publisherContext.getPriority());
}
public synchronized void handlePublishDeregister(MALMessageHeader header) throws MALException {
PublisherContext publisherContext = broker.deregisterPublisher(
header.getURIFrom(),
header.getDomain(),
header.getNetworkZone(),
header.getSession(),
header.getSessionName(),
header.getServiceArea(),
header.getService(),
header.getOperation(),
header.getAreaVersion());
if (publisherContext != null) {
// Update the QoS and priority levels so that the ack
// QoS and priority header fields are assigned with the FIRST
// PUBLISH REGISTER values.
if (logger.isLoggable(BasicLevel.DEBUG))
logger.log(BasicLevel.DEBUG, "publisherContext=" + publisherContext);
header.setQoSlevel(publisherContext.getQos());
header.setPriority(publisherContext.getPriority());
}
}
public synchronized void handleDeregister(MALMessageHeader header,
IdentifierList subscriptionIds) throws MALException {
try {
broker.deregister(header.getURIFrom(),
header.getDomain(),
header.getNetworkZone(),
header.getSession(),
header.getSessionName(),
subscriptionIds,
header.getServiceArea(),
header.getService(),
header.getOperation(),
header.getAreaVersion());
} catch (MALException e) {
if (logger.isLoggable(BasicLevel.DEBUG))
logger.log(BasicLevel.DEBUG, "", e);
throw e;
} catch (Exception exc) {
if (logger.isLoggable(BasicLevel.DEBUG))
logger.log(BasicLevel.DEBUG, "", exc);
throw new MALException(exc.toString(), exc);
}
}
public synchronized void deregister(MALMessageHeader header) throws MALException {
try {
broker.deregister(header.getURITo(),
header.getDomain(),
header.getNetworkZone(),
header.getSession(),
header.getSessionName(),
header.getServiceArea(),
header.getService(),
header.getOperation(),
header.getAreaVersion());
} catch (MALException e) {
if (logger.isLoggable(BasicLevel.DEBUG))
logger.log(BasicLevel.DEBUG, "", e);
throw e;
} catch (Exception exc) {
if (logger.isLoggable(BasicLevel.DEBUG))
logger.log(BasicLevel.DEBUG, "", exc);
throw new MALException(exc.toString(), exc);
}
}
public synchronized void unregisterMBeans() {
broker.unregisterMBeans();
}
public synchronized BrokerNotification[] publish(BrokerPublication publication)
throws UnknownEntityException, UnknownPublisherException {
return broker.publish(publication);
}
}
|
package org.occidere.githubnotifier.configuration;
import org.elasticsearch.client.RestHighLevelClient;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Configuration;
import org.springframework.data.elasticsearch.client.ClientConfiguration;
import org.springframework.data.elasticsearch.client.RestClients;
import org.springframework.data.elasticsearch.config.AbstractElasticsearchConfiguration;
import org.springframework.data.elasticsearch.repository.config.EnableElasticsearchRepositories;
/**
* @author occidere
* @Blog: https://blog.naver.com/occidere
* @Github: https://github.com/occidere
* @since 2019. 12. 17.
*/
@Configuration
@EnableElasticsearchRepositories
public class ElasticsearchConfiguration extends AbstractElasticsearchConfiguration {
@Value("${elasticsearch.endpoint:localhost:9200}")
private String elasticsearchEndpoint;
@Override
public RestHighLevelClient elasticsearchClient() {
return RestClients.create(ClientConfiguration.create(elasticsearchEndpoint)).rest();
}
}
|
package com.example.dmitry.kinopoiskparser;
import android.support.v4.app.Fragment;
public class SearchActivity extends CommonAbstractActivityHost {
@Override
protected Fragment createFragment() {
return new SearchFragment();
}
}
|
/*
* Copyright 2020 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.queue.amqp;
import com.netflix.conductor.contribs.queue.amqp.config.AMQPEventQueueProperties;
import com.netflix.conductor.contribs.queue.amqp.util.AMQPConstants;
import com.netflix.conductor.contribs.queue.amqp.util.AMQPSettings;
import com.netflix.conductor.core.events.queue.Message;
import com.rabbitmq.client.AMQP;
import com.rabbitmq.client.AMQP.PROTOCOL;
import com.rabbitmq.client.AMQP.Queue.DeclareOk;
import com.rabbitmq.client.Address;
import com.rabbitmq.client.Channel;
import com.rabbitmq.client.Connection;
import com.rabbitmq.client.ConnectionFactory;
import com.rabbitmq.client.Consumer;
import com.rabbitmq.client.Envelope;
import com.rabbitmq.client.GetResponse;
import com.rabbitmq.client.impl.AMQImpl;
import java.time.Duration;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.commons.lang3.StringUtils;
import org.junit.Before;
import org.junit.Test;
import org.mockito.ArgumentMatchers;
import org.mockito.Mockito;
import org.mockito.internal.stubbing.answers.DoesNothing;
import org.mockito.internal.stubbing.answers.ReturnsElementsOf;
import org.mockito.stubbing.OngoingStubbing;
import rx.Observable;
import rx.observers.Subscribers;
import rx.observers.TestSubscriber;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import java.util.Random;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyBoolean;
import static org.mockito.ArgumentMatchers.anyLong;
import static org.mockito.ArgumentMatchers.anyMap;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.atLeast;
import static org.mockito.Mockito.atLeastOnce;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
@SuppressWarnings({"rawtypes", "unchecked"})
public class AMQPObservableQueueTest {
final int batchSize = 10;
final int pollTimeMs = 500;
Address[] addresses;
AMQPEventQueueProperties properties;
@Before
public void setUp() {
properties = mock(AMQPEventQueueProperties.class);
when(properties.getBatchSize()).thenReturn(1);
when(properties.getPollTimeDuration()).thenReturn(Duration.ofMillis(100));
when(properties.getHosts()).thenReturn(ConnectionFactory.DEFAULT_HOST);
when(properties.getUsername()).thenReturn(ConnectionFactory.DEFAULT_USER);
when(properties.getPassword()).thenReturn(ConnectionFactory.DEFAULT_PASS);
when(properties.getVirtualHost()).thenReturn(ConnectionFactory.DEFAULT_VHOST);
when(properties.getPort()).thenReturn(PROTOCOL.PORT);
when(properties.getConnectionTimeout()).thenReturn(Duration.ofMillis(ConnectionFactory.DEFAULT_CONNECTION_TIMEOUT));
when(properties.isUseNio()).thenReturn(false);
when(properties.isDurable()).thenReturn(true);
when(properties.isExclusive()).thenReturn(false);
when(properties.isAutoDelete()).thenReturn(false);
when(properties.getContentType()).thenReturn("application/json");
when(properties.getContentEncoding()).thenReturn("UTF-8");
when(properties.getExchangeType()).thenReturn("topic");
when(properties.getDeliveryMode()).thenReturn(2);
when(properties.isUseExchange()).thenReturn(true);
addresses = new Address[]{new Address("localhost", PROTOCOL.PORT)};
AMQPConnection.setAMQPConnection(null);
}
List<GetResponse> buildQueue(final Random random, final int bound) {
final LinkedList<GetResponse> queue = new LinkedList();
for (int i = 0; i < bound; i++) {
AMQP.BasicProperties props = mock(AMQP.BasicProperties.class);
when(props.getMessageId()).thenReturn(UUID.randomUUID().toString());
Envelope envelope = mock(Envelope.class);
when(envelope.getDeliveryTag()).thenReturn(random.nextLong());
GetResponse response = mock(GetResponse.class);
when(response.getProps()).thenReturn(props);
when(response.getEnvelope()).thenReturn(envelope);
when(response.getBody()).thenReturn("{}".getBytes());
when(response.getMessageCount()).thenReturn(bound - i);
queue.add(response);
}
return queue;
}
Channel mockBaseChannel() throws IOException, TimeoutException {
Channel channel = mock(Channel.class);
when(channel.isOpen()).thenReturn(Boolean.TRUE);
/*
* doAnswer(invocation -> { when(channel.isOpen()).thenReturn(Boolean.FALSE);
* return DoesNothing.doesNothing(); }).when(channel).close();
*/
return channel;
}
Channel mockChannelForQueue(Channel channel, boolean isWorking, boolean exists, String name,
List<GetResponse> queue) throws IOException {
// queueDeclarePassive
final AMQImpl.Queue.DeclareOk queueDeclareOK = new AMQImpl.Queue.DeclareOk(name, queue.size(), 1);
if (exists) {
when(channel.queueDeclarePassive(eq(name))).thenReturn(queueDeclareOK);
} else {
when(channel.queueDeclarePassive(eq(name))).thenThrow(new IOException("Queue " + name + " exists"));
}
// queueDeclare
OngoingStubbing<DeclareOk> declareOkOngoingStubbing = when(channel.queueDeclare(eq(name),
anyBoolean(), anyBoolean(), anyBoolean(), anyMap()))
.thenReturn(queueDeclareOK);
if (!isWorking) {
declareOkOngoingStubbing.thenThrow(new IOException("Cannot declare queue " + name),
new RuntimeException("Not working"));
}
// messageCount
when(channel.messageCount(eq(name))).thenReturn((long) queue.size());
// basicGet
OngoingStubbing<String> getResponseOngoingStubbing = Mockito
.when(channel.basicConsume(eq(name), anyBoolean(), any(Consumer.class)))
.thenReturn(name);
if (!isWorking) {
getResponseOngoingStubbing.thenThrow(new IOException("Not working"), new RuntimeException("Not working"));
}
// basicPublish
if (isWorking) {
doNothing().when(channel).basicPublish(eq(StringUtils.EMPTY), eq(name),
any(AMQP.BasicProperties.class), any(byte[].class));
} else {
doThrow(new IOException("Not working")).when(channel).basicPublish(eq(StringUtils.EMPTY), eq(name),
any(AMQP.BasicProperties.class), any(byte[].class));
}
return channel;
}
Channel mockChannelForExchange(Channel channel, boolean isWorking, boolean exists, String queueName, String name,
String type, String routingKey, List<GetResponse> queue) throws IOException {
// exchangeDeclarePassive
final AMQImpl.Exchange.DeclareOk exchangeDeclareOK = new AMQImpl.Exchange.DeclareOk();
if (exists) {
when(channel.exchangeDeclarePassive(eq(name))).thenReturn(exchangeDeclareOK);
} else {
when(channel.exchangeDeclarePassive(eq(name)))
.thenThrow(new IOException("Exchange " + name + " exists"));
}
// exchangeDeclare
OngoingStubbing<AMQP.Exchange.DeclareOk> declareOkOngoingStubbing = when(channel
.exchangeDeclare(eq(name), eq(type), anyBoolean(), anyBoolean(), anyMap()))
.thenReturn(exchangeDeclareOK);
if (!isWorking) {
declareOkOngoingStubbing.thenThrow(new IOException("Cannot declare exchange " + name + " of type " + type),
new RuntimeException("Not working"));
}
// queueDeclarePassive
final AMQImpl.Queue.DeclareOk queueDeclareOK = new AMQImpl.Queue.DeclareOk(queueName, queue.size(), 1);
if (exists) {
when(channel.queueDeclarePassive(eq(queueName))).thenReturn(queueDeclareOK);
} else {
when(channel.queueDeclarePassive(eq(queueName)))
.thenThrow(new IOException("Queue " + queueName + " exists"));
}
// queueDeclare
when(channel.queueDeclare(eq(queueName), anyBoolean(), anyBoolean(),
anyBoolean(), anyMap())).thenReturn(queueDeclareOK);
// queueBind
when(channel.queueBind(eq(queueName), eq(name), eq(routingKey))).thenReturn(new AMQImpl.Queue.BindOk());
// messageCount
when(channel.messageCount(eq(name))).thenReturn((long) queue.size());
// basicGet
OngoingStubbing<String> getResponseOngoingStubbing = Mockito
.when(channel.basicConsume(eq(queueName), anyBoolean(), any(Consumer.class)))
.thenReturn(queueName);
if (!isWorking) {
getResponseOngoingStubbing.thenThrow(new IOException("Not working"), new RuntimeException("Not working"));
}
// basicPublish
if (isWorking) {
doNothing().when(channel).basicPublish(eq(name), eq(routingKey),
any(AMQP.BasicProperties.class), any(byte[].class));
} else {
doThrow(new IOException("Not working")).when(channel).basicPublish(eq(name), eq(routingKey),
any(AMQP.BasicProperties.class), any(byte[].class));
}
return channel;
}
Connection mockGoodConnection(Channel channel) throws IOException {
Connection connection = mock(Connection.class);
when(connection.createChannel()).thenReturn(channel);
when(connection.isOpen()).thenReturn(Boolean.TRUE);
/*
* doAnswer(invocation -> { when(connection.isOpen()).thenReturn(Boolean.FALSE);
* return DoesNothing.doesNothing(); }).when(connection).close();
*/ return connection;
}
Connection mockBadConnection() throws IOException {
Connection connection = mock(Connection.class);
when(connection.createChannel()).thenThrow(new IOException("Can't create channel"));
when(connection.isOpen()).thenReturn(Boolean.TRUE);
doThrow(new IOException("Can't close connection")).when(connection).close();
return connection;
}
ConnectionFactory mockConnectionFactory(Connection connection) throws IOException, TimeoutException {
ConnectionFactory connectionFactory = mock(ConnectionFactory.class);
when(connectionFactory.newConnection(eq(addresses), Mockito.anyString())).thenReturn(connection);
return connectionFactory;
}
void runObserve(Channel channel, AMQPObservableQueue observableQueue, String queueName, boolean useWorkingChannel,
int batchSize) throws IOException {
final List<Message> found = new ArrayList<>(batchSize);
TestSubscriber<Message> subscriber = TestSubscriber.create(Subscribers.create(found::add));
rx.Observable<Message> observable = observableQueue.observe().take(pollTimeMs * 2, TimeUnit.MILLISECONDS);
assertNotNull(observable);
observable.subscribe(subscriber);
subscriber.awaitTerminalEvent();
subscriber.assertNoErrors();
subscriber.assertCompleted();
if (useWorkingChannel) {
verify(channel, atLeast(1)).basicConsume(eq(queueName), anyBoolean(), any(Consumer.class));
doNothing().when(channel).basicAck(anyLong(), eq(false));
doAnswer(DoesNothing.doesNothing()).when(channel).basicAck(anyLong(), eq(false));
observableQueue.ack(Collections.synchronizedList(found));
} else {
assertNotNull(found);
assertTrue(found.isEmpty());
}
observableQueue.close();
}
// Tests
@Test
public void testGetMessagesFromExistingExchangeAndDefaultConfiguration() throws IOException, TimeoutException {
// Mock channel and connection
Channel channel = mockBaseChannel();
Connection connection = mockGoodConnection(channel);
testGetMessagesFromExchangeAndDefaultConfiguration(channel, connection, true, true);
}
@Test
public void testGetMessagesFromNotExistingExchangeAndDefaultConfiguration() throws IOException, TimeoutException {
// Mock channel and connection
Channel channel = mockBaseChannel();
Connection connection = mockGoodConnection(channel);
testGetMessagesFromExchangeAndDefaultConfiguration(channel, connection, false, true);
}
@Test
public void testGetMessagesFromExistingExchangeWithDurableExclusiveAutoDeleteQueueConfiguration()
throws IOException, TimeoutException {
// Mock channel and connection
Channel channel = mockBaseChannel();
Connection connection = mockGoodConnection(channel);
testGetMessagesFromExchangeAndCustomConfigurationFromURI(channel, connection, true, true, true, true, true);
}
@Test
public void testGetMessagesFromNotExistingExchangeWithNonDurableNonExclusiveNonAutoDeleteQueueConfiguration()
throws IOException, TimeoutException {
// Mock channel and connection
Channel channel = mockBaseChannel();
Connection connection = mockGoodConnection(channel);
testGetMessagesFromExchangeAndCustomConfigurationFromURI(channel, connection, false, true, false, false, false);
}
@Test
public void testGetMessagesFromNotExistingExchangeWithDurableExclusiveNonAutoDeleteQueueConfiguration()
throws IOException, TimeoutException {
// Mock channel and connection
Channel channel = mockBaseChannel();
Connection connection = mockGoodConnection(channel);
testGetMessagesFromExchangeAndCustomConfigurationFromURI(channel, connection, false, true, true, true, false);
}
@Test
public void testPublishMessagesToNotExistingExchangeAndDefaultConfiguration() throws IOException, TimeoutException {
// Mock channel and connection
Channel channel = mockBaseChannel();
Connection connection = mockGoodConnection(channel);
testPublishMessagesToExchangeAndDefaultConfiguration(channel, connection, false, true);
}
@Test(expected = RuntimeException.class)
public void testGetMessagesFromExchangeWithBadConnection() throws IOException, TimeoutException {
// Mock channel and connection
Channel channel = mockBaseChannel();
Connection connection = mockBadConnection();
testGetMessagesFromExchangeAndDefaultConfiguration(channel, connection, true, true);
}
@Test(expected = RuntimeException.class)
public void testPublishMessagesToExchangeWithBadConnection() throws IOException, TimeoutException {
// Mock channel and connection
Channel channel = mockBaseChannel();
Connection connection = mockBadConnection();
testPublishMessagesToExchangeAndDefaultConfiguration(channel, connection, true, true);
}
@Test
public void testGetMessagesFromExchangeWithBadChannel() throws IOException, TimeoutException {
// Mock channel and connection
Channel channel = mockBaseChannel();
Connection connection = mockGoodConnection(channel);
testGetMessagesFromExchangeAndDefaultConfiguration(channel, connection, true, false);
}
@Test(expected = RuntimeException.class)
public void testPublishMessagesToExchangeWithBadChannel() throws IOException, TimeoutException {
// Mock channel and connection
Channel channel = mockBaseChannel();
Connection connection = mockGoodConnection(channel);
testPublishMessagesToExchangeAndDefaultConfiguration(channel, connection, true, false);
}
@Test
public void testAck() throws IOException, TimeoutException {
// Mock channel and connection
Channel channel = mockBaseChannel();
Connection connection = mockGoodConnection(channel);
final Random random = new Random();
final String name = RandomStringUtils.randomAlphabetic(30), type = "topic",
routingKey = RandomStringUtils.randomAlphabetic(30);
final AMQPSettings settings = new AMQPSettings(properties).fromURI("amqp_exchange:" + name + "?exchangeType="
+ type + "&routingKey=" + routingKey);
AMQPObservableQueue observableQueue = new AMQPObservableQueue(mockConnectionFactory(connection), addresses,
true, settings, batchSize, pollTimeMs);
List<Message> messages = new LinkedList<>();
Message msg = new Message();
msg.setId("0e3eef8f-ebb1-4244-9665-759ab5bdf433");
msg.setPayload("Payload");
msg.setReceipt("1");
messages.add(msg);
List<String> deliveredTags = observableQueue.ack(messages);
assertNotNull(deliveredTags);
}
private void testGetMessagesFromExchangeAndDefaultConfiguration(Channel channel, Connection connection,
boolean exists, boolean useWorkingChannel) throws IOException, TimeoutException {
final Random random = new Random();
final String name = RandomStringUtils.randomAlphabetic(30), type = "topic",
routingKey = RandomStringUtils.randomAlphabetic(30);
final String queueName = String.format("bound_to_%s", name);
final AMQPSettings settings = new AMQPSettings(properties).fromURI("amqp_exchange:" + name + "?exchangeType="
+ type + "&routingKey=" + routingKey);
assertTrue(settings.isDurable());
assertFalse(settings.isExclusive());
assertFalse(settings.autoDelete());
assertEquals(2, settings.getDeliveryMode());
assertEquals(name, settings.getQueueOrExchangeName());
assertEquals(type, settings.getExchangeType());
assertEquals(routingKey, settings.getRoutingKey());
List<GetResponse> queue = buildQueue(random, batchSize);
channel = mockChannelForExchange(channel, useWorkingChannel, exists, queueName, name, type, routingKey, queue);
AMQPObservableQueue observableQueue = new AMQPObservableQueue(mockConnectionFactory(connection), addresses,
true, settings, batchSize, pollTimeMs);
assertArrayEquals(addresses, observableQueue.getAddresses());
assertEquals(AMQPConstants.AMQP_EXCHANGE_TYPE, observableQueue.getType());
assertEquals(AMQPConstants.AMQP_EXCHANGE_TYPE + ":" + name + "?exchangeType="
+ type + "&routingKey=" + routingKey, observableQueue.getName());
assertEquals(name, observableQueue.getURI());
assertEquals(batchSize, observableQueue.getBatchSize());
assertEquals(pollTimeMs, observableQueue.getPollTimeInMS());
assertEquals(queue.size(), observableQueue.size());
runObserve(channel, observableQueue, queueName, useWorkingChannel, batchSize);
if (useWorkingChannel) {
verify(channel, atLeastOnce()).exchangeDeclare(eq(name), eq(type), eq(settings.isDurable()),
eq(settings.autoDelete()), eq(Collections.emptyMap()));
verify(channel, atLeastOnce())
.queueDeclare(eq(queueName), eq(settings.isDurable()), eq(settings.isExclusive()),
eq(settings.autoDelete()),
anyMap());
verify(channel, atLeastOnce()).queueBind(eq(queueName), eq(name), eq(routingKey));
}
}
private void testGetMessagesFromExchangeAndCustomConfigurationFromURI(Channel channel, Connection connection,
boolean exists, boolean useWorkingChannel, boolean durable, boolean exclusive, boolean autoDelete)
throws IOException, TimeoutException {
final Random random = new Random();
final String name = RandomStringUtils.randomAlphabetic(30), type = "topic",
routingKey = RandomStringUtils.randomAlphabetic(30);
final String queueName = String.format("bound_to_%s", name);
final AMQPSettings settings = new AMQPSettings(properties).fromURI("amqp_exchange:" + name + "?exchangeType="
+ type + "&routingKey=" + routingKey + "&deliveryMode=2"
+ "&durable=" + durable + "&exclusive=" + exclusive + "&autoDelete=" + autoDelete);
assertEquals(durable, settings.isDurable());
assertEquals(exclusive, settings.isExclusive());
assertEquals(autoDelete, settings.autoDelete());
assertEquals(2, settings.getDeliveryMode());
assertEquals(name, settings.getQueueOrExchangeName());
assertEquals(type, settings.getExchangeType());
assertEquals(routingKey, settings.getRoutingKey());
List<GetResponse> queue = buildQueue(random, batchSize);
channel = mockChannelForExchange(channel, useWorkingChannel, exists, queueName, name, type, routingKey, queue);
AMQPObservableQueue observableQueue = new AMQPObservableQueue(mockConnectionFactory(connection), addresses,
true, settings, batchSize, pollTimeMs);
assertArrayEquals(addresses, observableQueue.getAddresses());
assertEquals(AMQPConstants.AMQP_EXCHANGE_TYPE, observableQueue.getType());
assertEquals(AMQPConstants.AMQP_EXCHANGE_TYPE + ":" + name + "?exchangeType="
+ type + "&routingKey=" + routingKey + "&deliveryMode=2"
+ "&durable=" + durable + "&exclusive=" + exclusive + "&autoDelete=" + autoDelete,
observableQueue.getName());
assertEquals(name, observableQueue.getURI());
assertEquals(batchSize, observableQueue.getBatchSize());
assertEquals(pollTimeMs, observableQueue.getPollTimeInMS());
assertEquals(queue.size(), observableQueue.size());
runObserve(channel, observableQueue, queueName, useWorkingChannel, batchSize);
if (useWorkingChannel) {
verify(channel, atLeastOnce()).exchangeDeclare(eq(name), eq(type), eq(settings.isDurable()),
eq(settings.autoDelete()), eq(Collections.emptyMap()));
verify(channel, atLeastOnce())
.queueDeclare(eq(queueName), eq(settings.isDurable()), eq(settings.isExclusive()),
eq(settings.autoDelete()),
anyMap());
verify(channel, atLeastOnce()).queueBind(eq(queueName), eq(name), eq(routingKey));
}
}
private void testPublishMessagesToExchangeAndDefaultConfiguration(Channel channel, Connection connection,
boolean exists, boolean useWorkingChannel) throws IOException, TimeoutException {
final Random random = new Random();
final String name = RandomStringUtils.randomAlphabetic(30), type = "topic",
queueName = RandomStringUtils.randomAlphabetic(30), routingKey = RandomStringUtils.randomAlphabetic(30);
final AMQPSettings settings = new AMQPSettings(properties).fromURI("amqp_exchange:" + name + "?exchangeType="
+ type + "&routingKey=" + routingKey + "&deliveryMode=2&durable=true&exclusive=false&autoDelete=true");
assertTrue(settings.isDurable());
assertFalse(settings.isExclusive());
assertTrue(settings.autoDelete());
assertEquals(2, settings.getDeliveryMode());
assertEquals(name, settings.getQueueOrExchangeName());
assertEquals(type, settings.getExchangeType());
assertEquals(routingKey, settings.getRoutingKey());
List<GetResponse> queue = buildQueue(random, batchSize);
channel = mockChannelForExchange(channel, useWorkingChannel, exists, queueName, name, type, routingKey, queue);
AMQPObservableQueue observableQueue = new AMQPObservableQueue(mockConnectionFactory(connection), addresses,
true, settings, batchSize, pollTimeMs);
assertArrayEquals(addresses, observableQueue.getAddresses());
assertEquals(AMQPConstants.AMQP_EXCHANGE_TYPE, observableQueue.getType());
assertEquals(
AMQPConstants.AMQP_EXCHANGE_TYPE + ":" + name + "?exchangeType=" + type + "&routingKey=" + routingKey
+ "&deliveryMode=2&durable=true&exclusive=false&autoDelete=true", observableQueue.getName());
assertEquals(name, observableQueue.getURI());
assertEquals(batchSize, observableQueue.getBatchSize());
assertEquals(pollTimeMs, observableQueue.getPollTimeInMS());
assertEquals(queue.size(), observableQueue.size());
List<Message> messages = new LinkedList<>();
Observable.range(0, batchSize).forEach((Integer x) -> messages.add(new Message("" + x, "payload: " + x, null)));
assertEquals(batchSize, messages.size());
observableQueue.publish(messages);
if (useWorkingChannel) {
verify(channel, times(batchSize)).basicPublish(eq(name), eq(routingKey), any(AMQP.BasicProperties.class),
any(byte[].class));
}
}
@Test
public void testGetMessagesFromExistingQueueAndDefaultConfiguration()
throws IOException, TimeoutException {
// Mock channel and connection
Channel channel = mockBaseChannel();
Connection connection = mockGoodConnection(channel);
testGetMessagesFromQueueAndDefaultConfiguration(channel, connection, true, true);
}
@Test
public void testGetMessagesFromNotExistingQueueAndDefaultConfiguration()
throws IOException, TimeoutException {
// Mock channel and connection
Channel channel = mockBaseChannel();
Connection connection = mockGoodConnection(channel);
testGetMessagesFromQueueAndDefaultConfiguration(channel, connection, false, true);
}
@Test
public void testPublishMessagesToNotExistingQueueAndDefaultConfiguration()
throws IOException, TimeoutException {
// Mock channel and connection
Channel channel = mockBaseChannel();
Connection connection = mockGoodConnection(channel);
testPublishMessagesToQueueAndDefaultConfiguration(channel, connection, false, true);
}
@Test(expected = RuntimeException.class)
public void testGetMessagesFromQueueWithBadConnection()
throws IOException, TimeoutException {
// Mock channel and connection
Channel channel = mockBaseChannel();
Connection connection = mockBadConnection();
testGetMessagesFromQueueAndDefaultConfiguration(channel, connection, true, true);
}
@Test(expected = RuntimeException.class)
public void testPublishMessagesToQueueWithBadConnection()
throws IOException, TimeoutException {
// Mock channel and connection
Channel channel = mockBaseChannel();
Connection connection = mockBadConnection();
testPublishMessagesToQueueAndDefaultConfiguration(channel, connection, true, true);
}
@Test
public void testGetMessagesFromQueueWithBadChannel()
throws IOException, TimeoutException {
// Mock channel and connection
Channel channel = mockBaseChannel();
Connection connection = mockGoodConnection(channel);
testGetMessagesFromQueueAndDefaultConfiguration(channel, connection, true, false);
}
@Test(expected = RuntimeException.class)
public void testPublishMessagesToQueueWithBadChannel()
throws IOException, TimeoutException {
// Mock channel and connection
Channel channel = mockBaseChannel();
Connection connection = mockGoodConnection(channel);
testPublishMessagesToQueueAndDefaultConfiguration(channel, connection, true, false);
}
@Test(expected = IllegalArgumentException.class)
public void testAMQPObservalbleQueue_empty()
throws IOException, TimeoutException {
AMQPSettings settings = new AMQPSettings(properties).fromURI("amqp_queue:test");
AMQPObservableQueue observableQueue = new AMQPObservableQueue(null,
addresses, false, settings, batchSize, pollTimeMs);
}
@Test(expected = IllegalArgumentException.class)
public void testAMQPObservalbleQueue_addressEmpty()
throws IOException, TimeoutException {
AMQPSettings settings = new AMQPSettings(properties).fromURI("amqp_queue:test");
AMQPObservableQueue observableQueue = new AMQPObservableQueue(
mockConnectionFactory(mockGoodConnection(mockBaseChannel())),
null, false, settings, batchSize, pollTimeMs);
}
@Test(expected = IllegalArgumentException.class)
public void testAMQPObservalbleQueue_settingsEmpty()
throws IOException, TimeoutException {
AMQPSettings settings = new AMQPSettings(properties).fromURI("amqp_queue:test");
AMQPObservableQueue observableQueue = new AMQPObservableQueue(
mockConnectionFactory(mockGoodConnection(mockBaseChannel())),
addresses, false, null, batchSize, pollTimeMs);
}
@Test(expected = IllegalArgumentException.class)
public void testAMQPObservalbleQueue_batchsizezero()
throws IOException, TimeoutException {
AMQPSettings settings = new AMQPSettings(properties).fromURI("amqp_queue:test");
AMQPObservableQueue observableQueue = new AMQPObservableQueue(
mockConnectionFactory(mockGoodConnection(mockBaseChannel())),
addresses, false, settings, 0, pollTimeMs);
}
@Test(expected = IllegalArgumentException.class)
public void testAMQPObservalbleQueue_polltimezero()
throws IOException, TimeoutException {
AMQPSettings settings = new AMQPSettings(properties).fromURI("amqp_queue:test");
AMQPObservableQueue observableQueue = new AMQPObservableQueue(
mockConnectionFactory(mockGoodConnection(mockBaseChannel())),
addresses, false, settings, batchSize, 0);
}
@Test
public void testclosetExistingQueueAndDefaultConfiguration()
throws IOException, TimeoutException {
// Mock channel and connection
Channel channel = mockBaseChannel();
Connection connection = mockGoodConnection(channel);
testGetMessagesFromQueueAndDefaultConfiguration_close(channel, connection, false, true);
}
private void testGetMessagesFromQueueAndDefaultConfiguration(Channel channel, Connection connection,
boolean queueExists, boolean useWorkingChannel)
throws IOException, TimeoutException {
final Random random = new Random();
final String queueName = RandomStringUtils.randomAlphabetic(30);
AMQPSettings settings = new AMQPSettings(properties).fromURI("amqp_queue:" + queueName);
List<GetResponse> queue = buildQueue(random, batchSize);
channel = mockChannelForQueue(channel, useWorkingChannel, queueExists, queueName, queue);
AMQPObservableQueue observableQueue = new AMQPObservableQueue(
mockConnectionFactory(connection),
addresses, false, settings, batchSize, pollTimeMs);
assertArrayEquals(addresses, observableQueue.getAddresses());
assertEquals(AMQPConstants.AMQP_QUEUE_TYPE, observableQueue.getType());
assertEquals(AMQPConstants.AMQP_QUEUE_TYPE + ":" + queueName, observableQueue.getName());
assertEquals(queueName, observableQueue.getURI());
assertEquals(batchSize, observableQueue.getBatchSize());
assertEquals(pollTimeMs, observableQueue.getPollTimeInMS());
assertEquals(queue.size(), observableQueue.size());
runObserve(channel, observableQueue, queueName, useWorkingChannel, batchSize);
}
private void testGetMessagesFromQueueAndDefaultConfiguration_close(Channel channel, Connection connection,
boolean queueExists, boolean useWorkingChannel)
throws IOException, TimeoutException {
final Random random = new Random();
final String queueName = RandomStringUtils.randomAlphabetic(30);
AMQPSettings settings = new AMQPSettings(properties).fromURI("amqp_queue:" + queueName);
List<GetResponse> queue = buildQueue(random, batchSize);
channel = mockChannelForQueue(channel, useWorkingChannel, queueExists, queueName, queue);
AMQPObservableQueue observableQueue = new AMQPObservableQueue(
mockConnectionFactory(connection),
addresses, false, settings, batchSize, pollTimeMs);
observableQueue.close();
assertArrayEquals(addresses, observableQueue.getAddresses());
assertEquals(AMQPConstants.AMQP_QUEUE_TYPE, observableQueue.getType());
assertEquals(AMQPConstants.AMQP_QUEUE_TYPE + ":" + queueName, observableQueue.getName());
assertEquals(queueName, observableQueue.getURI());
assertEquals(batchSize, observableQueue.getBatchSize());
assertEquals(pollTimeMs, observableQueue.getPollTimeInMS());
assertEquals(queue.size(), observableQueue.size());
}
private void testPublishMessagesToQueueAndDefaultConfiguration(Channel channel, Connection connection,
boolean queueExists, boolean useWorkingChannel)
throws IOException, TimeoutException {
final Random random = new Random();
final String queueName = RandomStringUtils.randomAlphabetic(30);
final AMQPSettings settings = new AMQPSettings(properties)
.fromURI("amqp_queue:" + queueName + "?deliveryMode=2&durable=true&exclusive=false&autoDelete=true");
assertTrue(settings.isDurable());
assertFalse(settings.isExclusive());
assertTrue(settings.autoDelete());
assertEquals(2, settings.getDeliveryMode());
List<GetResponse> queue = buildQueue(random, batchSize);
channel = mockChannelForQueue(channel, useWorkingChannel, queueExists, queueName, queue);
AMQPObservableQueue observableQueue = new AMQPObservableQueue(
mockConnectionFactory(connection),
addresses, false, settings, batchSize, pollTimeMs);
assertArrayEquals(addresses, observableQueue.getAddresses());
assertEquals(AMQPConstants.AMQP_QUEUE_TYPE, observableQueue.getType());
assertEquals(AMQPConstants.AMQP_QUEUE_TYPE + ":" + queueName
+ "?deliveryMode=2&durable=true&exclusive=false&autoDelete=true", observableQueue.getName());
assertEquals(queueName, observableQueue.getURI());
assertEquals(batchSize, observableQueue.getBatchSize());
assertEquals(pollTimeMs, observableQueue.getPollTimeInMS());
assertEquals(queue.size(), observableQueue.size());
List<Message> messages = new LinkedList<>();
Observable.range(0, batchSize).forEach((Integer x) -> messages.add(new Message("" + x, "payload: " + x, null)));
assertEquals(batchSize, messages.size());
observableQueue.publish(messages);
if (useWorkingChannel) {
verify(channel, times(batchSize)).basicPublish(eq(StringUtils.EMPTY), eq(queueName),
any(AMQP.BasicProperties.class), any(byte[].class));
}
}
}
|
package io.easeci.core.workspace.cache;
import java.nio.file.Path;
/**
* Interface that exposing methods
* for removing resource from cache of Easeci's workspace.
* @author Karol Meksuła
* 2020-11-07
* */
public interface CacheGarbageCollector {
/**
* Removes concrete path of file or directory placed in .cache/ in workspace.
* @param path is a path to file or directory.
* Path must be secured for removing only resources
* from workspace and not from outside of it
* @return long value that inform us about deleted resources size in bytes
* */
long cleanup(Path path);
/**
* Removes all files and directories placed in .cache/ in workspace.
* @return long value that inform us about deleted resources size in bytes
* */
long cleanup();
}
|
package uva.tomo113;
/*uva 11388*/
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.Locale;
import java.util.StringTokenizer;
public class GCD_LCM {
static BufferedReader input;
static StringTokenizer _stk;
static String readln() throws IOException{
String l=input.readLine();
if(l!=null) _stk=new StringTokenizer(l," ");
return l;
}
static String next(){return _stk.nextToken();}
static int nextInt(){return Integer.parseInt(next());}
public static void main(String[] args) throws IOException {
Locale.setDefault(Locale.US);
input=new BufferedReader(new InputStreamReader(System.in));
// input=new BufferedReader(new FileReader("GCD_LCM"));
readln();
int numCases=nextInt();
for (int casesId = 1; casesId <= numCases; casesId++) {
readln();
int g=nextInt(), l=nextInt();
if((l%g)==0)
System.out.printf("%d %d\n", g,l);
else
System.out.printf("%d\n", -1);
}
}
}
|
/*
* $Header: /home/cvs/jakarta-tomcat-4.0/webapps/admin/WEB-INF/classes/org/apache/webapp/admin/users/GroupsForm.java,v 1.1 2002/02/10 05:48:57 craigmcc Exp $
* $Revision: 1.1 $
* $Date: 2002/02/10 05:48:57 $
*
* ====================================================================
*
* The Apache Software License, Version 1.1
*
* Copyright (c) 2002 The Apache Software Foundation. All rights
* reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. The end-user documentation included with the redistribution, if
* any, must include the following acknowlegement:
* "This product includes software developed by the
* Apache Software Foundation (http://www.apache.org/)."
* Alternately, this acknowlegement may appear in the software itself,
* if and wherever such third-party acknowlegements normally appear.
*
* 4. The names "The Jakarta Project", "Struts", and "Apache Software
* Foundation" must not be used to endorse or promote products derived
* from this software without prior written permission. For written
* permission, please contact apache@apache.org.
*
* 5. Products derived from this software may not be called "Apache"
* nor may "Apache" appear in their names without prior written
* permission of the Apache Group.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
* ====================================================================
*
* This software consists of voluntary contributions made by many
* individuals on behalf of the Apache Software Foundation. For more
* information on the Apache Software Foundation, please see
* <http://www.apache.org/>.
*
*/
package org.apache.webapp.admin.users;
import javax.servlet.http.HttpServletRequest;
import org.apache.struts.action.ActionError;
import org.apache.struts.action.ActionErrors;
import org.apache.struts.action.ActionForm;
import org.apache.struts.action.ActionMapping;
/**
* Form bean for the delete groups page.
*
* @author Craig R. McClanahan
* @version $Revision: 1.1 $ $Date: 2002/02/10 05:48:57 $
* @since 4.1
*/
public final class GroupsForm extends BaseForm {
// ----------------------------------------------------- Instance Variables
// ------------------------------------------------------------- Properties
/**
* The object names of the specified groups.
*/
private String groups[] = null;
public String[] getGroups() {
return (this.groups);
}
public void setGroups(String groups[]) {
this.groups = groups;
}
// --------------------------------------------------------- Public Methods
/**
* Reset all properties to their default values.
*
* @param mapping The mapping used to select this instance
* @param request The servlet request we are processing
*/
public void reset(ActionMapping mapping, HttpServletRequest request) {
super.reset(mapping, request);
this.groups = null;
}
}
|
package improvements;
public class Lumbermill {
protected int goldMaintenance;
protected int timeToBuild;
protected int productionBase;
protected int productionPotential;
//all improvements would be built by the worker onto a tile, if said worker was implemented in the game
//needs to be built on the forest tile
//increases production of a city
public Lumbermill() {
goldMaintenance = 2;
timeToBuild = 4;
productionBase = 2;
productionPotential = 3;
}
}
|
package uk.gov.hmcts.reform.iacaseapi.domain.handlers.postsubmit;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.when;
import static uk.gov.hmcts.reform.iacaseapi.domain.entities.AsylumCaseFieldDefinition.*;
import java.util.Optional;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import uk.gov.hmcts.reform.iacaseapi.domain.entities.AsylumCase;
import uk.gov.hmcts.reform.iacaseapi.domain.entities.ccd.field.PaymentStatus;
@ExtendWith(MockitoExtension.class)
@SuppressWarnings("unchecked")
class AppealPaymentConfirmationProviderTest {
@Mock
private AsylumCase asylumCase;
private AppealPaymentConfirmationProvider appealPaymentConfirmationProvider =
new AppealPaymentConfirmationProvider();
@Test
void should_return_correct_value_for_payment_reference() {
when(asylumCase.read(PAYMENT_REFERENCE, String.class)).thenReturn(Optional.of("Some account reference"));
assertThat(appealPaymentConfirmationProvider.getPaymentReferenceNumber(asylumCase))
.isEqualTo("Some account reference");
}
@Test
void should_return_correct_value_for_account_number() {
when(asylumCase.read(PBA_NUMBER, String.class)).thenReturn(Optional.of("Some account number"));
assertThat(appealPaymentConfirmationProvider.getPaymentAccountNumber(asylumCase))
.isEqualTo("Some account number");
}
@Test
void should_return_correct_value_for_fee_amount_for_display() {
when(asylumCase.read(DECISION_HEARING_FEE_OPTION, String.class)).thenReturn(Optional.of("decisionWithHearing"));
when(asylumCase.read(FEE_WITH_HEARING, String.class)).thenReturn(Optional.of("140"));
assertThat(appealPaymentConfirmationProvider.getFee(asylumCase)).isEqualTo("140");
}
@Test
void should_return_correct_fee_for_fee_without_hearing() {
when(asylumCase.read(DECISION_HEARING_FEE_OPTION, String.class)).thenReturn(Optional.of("decisionWithoutHearing"));
when(asylumCase.read(FEE_WITHOUT_HEARING, String.class)).thenReturn(Optional.of("80"));
assertThat(appealPaymentConfirmationProvider.getFee(asylumCase)).isEqualTo("80");
}
@Test
void should_return_correct_value_for_payment_status() {
when(asylumCase.read(PAYMENT_STATUS, PaymentStatus.class)).thenReturn(Optional.of(PaymentStatus.PAID));
assertThat(appealPaymentConfirmationProvider.getPaymentStatus(asylumCase))
.isEqualTo(Optional.of(PaymentStatus.PAID));
}
}
|
package example.service;
import example.repo.Customer1082Repository;
import org.springframework.stereotype.Service;
@Service
public class Customer1082Service {
public Customer1082Service(Customer1082Repository repo) {
}
}
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.adoptopenjdk.test;
import org.testng.annotations.Test;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.logging.Logger;
import static net.adoptopenjdk.test.JdkPlatform.Architecture;
import static net.adoptopenjdk.test.JdkPlatform.OperatingSystem;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertTrue;
/**
* Tests the availability of various features like garbage collectors, flight recorder, that need to be enabled via
* command line flags.
*/
@Test(groups = {"level.extended"})
public class FeatureTests {
private static final Logger LOGGER = Logger.getLogger(FeatureTests.class.getName());
private final JdkVersion jdkVersion = new JdkVersion();
private final JdkPlatform jdkPlatform = new JdkPlatform();
/**
* Tests whether Shenandoah GC is available.
* <p/>
* Shenandoah GC was enabled by default with JDK 15 (JEP 379) and backported to 11.0.9.
*
* @see <a href="https://openjdk.java.net/jeps/379">JEP 379: Shenandoah: A Low-Pause-Time Garbage
* Collector (Production)</a>
* @see <a href="https://bugs.openjdk.java.net/browse/JDK-8250784">JDK-8250784 (Backport)</a>
* @see <a href="https://wiki.openjdk.java.net/display/shenandoah/Main#Main-SupportOverview">Shenandoah Support
* Overview</a>
*/
@Test
public void testShenandoahAvailable() {
String testJdkHome = System.getenv("TEST_JDK_HOME");
if (testJdkHome == null) {
throw new AssertionError("TEST_JDK_HOME is not set");
}
boolean shouldBePresent = false;
if ((jdkVersion.isNewerOrEqual(15) || jdkVersion.isNewerOrEqualSameFeature(11, 0, 9))) {
if (jdkPlatform.runsOn(OperatingSystem.LINUX, Architecture.AARCH64)
|| jdkPlatform.runsOn(OperatingSystem.LINUX, Architecture.X86)
|| jdkPlatform.runsOn(OperatingSystem.LINUX, Architecture.X64)
|| jdkPlatform.runsOn(OperatingSystem.MACOS, Architecture.X64)
|| jdkPlatform.runsOn(OperatingSystem.MACOS, Architecture.AARCH64)
|| jdkPlatform.runsOn(OperatingSystem.WINDOWS, Architecture.X64)
|| jdkPlatform.runsOn(OperatingSystem.WINDOWS, Architecture.AARCH64)
) {
shouldBePresent = true;
}
}
LOGGER.info(String.format("Detected %s on %s, expect Shenandoah to be present: %s",
jdkVersion, jdkPlatform, shouldBePresent));
List<String> command = new ArrayList<>();
command.add(String.format("%s/bin/java", testJdkHome));
command.add("-XX:+UseShenandoahGC");
command.add("-version");
try {
ProcessBuilder processBuilder = new ProcessBuilder(command);
processBuilder.inheritIO();
int retCode = processBuilder.start().waitFor();
if (shouldBePresent) {
assertEquals(retCode, 0, "Expected Shenandoah to be present but it is absent.");
} else {
assertTrue(retCode > 0, "Expected Shenandoah to be absent but it is present.");
}
} catch (InterruptedException | IOException e) {
throw new RuntimeException("Failed to launch JVM", e);
}
}
/**
* Tests whether Z Garbage Collector is available.
* <p/>
* Z Garbage Collector was enabled by default with JDK 15 (JEP 377).
*
* @see <a href="https://openjdk.java.net/jeps/377">JEP 377: ZGC: A Scalable Low-Latency Garbage Collector
* (Production)</a>
*/
@Test
public void testZGCAvailable() {
String testJdkHome = System.getenv("TEST_JDK_HOME");
if (testJdkHome == null) {
throw new AssertionError("TEST_JDK_HOME is not set");
}
boolean shouldBePresent = false;
if (jdkVersion.isNewerOrEqual(15)) {
if (jdkPlatform.runsOn(OperatingSystem.LINUX, Architecture.AARCH64)
|| jdkPlatform.runsOn(OperatingSystem.LINUX, Architecture.X64)
|| jdkPlatform.runsOn(OperatingSystem.MACOS, Architecture.X64)
|| jdkPlatform.runsOn(OperatingSystem.WINDOWS, Architecture.X64)
) {
shouldBePresent = true;
}
}
LOGGER.info(String.format("Detected %s on %s, expect ZGC to be present: %s",
jdkVersion, jdkPlatform, shouldBePresent));
List<String> command = new ArrayList<>();
command.add(String.format("%s/bin/java", testJdkHome));
command.add("-XX:+UseZGC");
command.add("-version");
try {
ProcessBuilder processBuilder = new ProcessBuilder(command);
processBuilder.inheritIO();
int retCode = processBuilder.start().waitFor();
if (shouldBePresent) {
assertEquals(retCode, 0, "Expected ZGC to be present but it is absent.");
} else {
assertTrue(retCode > 0, "Expected ZGC to be absent but it is present.");
}
} catch (InterruptedException | IOException e) {
throw new RuntimeException("Failed to launch JVM", e);
}
}
/**
* Tests whether JDK Flight Recorder is available.
* <p/>
* JDK Flight recorder was added to JDK 11 (JEP 328) and backported to JDK 8u262.
*
* @see <a href="https://openjdk.java.net/jeps/328">JEP 328: Flight Recorder</a>
* @see <a href="https://bugs.openjdk.java.net/browse/JDK-8223147>JDK-8223147 (backport to 8u262)</a>
*/
@Test
public void testJFRAvailable() {
String testJdkHome = System.getenv("TEST_JDK_HOME");
if (testJdkHome == null) {
throw new AssertionError("TEST_JDK_HOME is not set");
}
boolean shouldBePresent = false;
if (jdkVersion.isNewerOrEqual(11) || jdkVersion.isNewerOrEqualSameFeature(8, 0, 262)) {
shouldBePresent = true;
}
LOGGER.info(String.format("Detected %s on %s, expect JFR to be present: %s",
jdkVersion, jdkPlatform, shouldBePresent));
List<String> command = new ArrayList<>();
command.add(String.format("%s/bin/java", testJdkHome));
command.add("-XX:StartFlightRecording");
command.add("-version");
try {
ProcessBuilder processBuilder = new ProcessBuilder(command);
processBuilder.inheritIO();
int retCode = processBuilder.start().waitFor();
if (shouldBePresent) {
assertEquals(retCode, 0, "Expected JFR to be present but it is absent.");
} else {
assertTrue(retCode > 0, "Expected JFR to be absent but it is present.");
}
} catch (InterruptedException | IOException e) {
throw new RuntimeException("Failed to launch JVM", e);
}
}
}
|
/*
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.cloud.spanner.connection;
import static org.mockito.Matchers.anyListOf;
import static org.mockito.Matchers.eq;
import static org.mockito.Matchers.isNull;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import com.google.api.gax.longrunning.OperationFuture;
import com.google.cloud.spanner.DatabaseAdminClient;
import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ExecutionException;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
@RunWith(JUnit4.class)
public class DdlClientTest {
private final String instanceId = "test-instance";
private final String databaseId = "test-database";
private DdlClient createSubject(DatabaseAdminClient client) {
return DdlClient.newBuilder()
.setInstanceId(instanceId)
.setDatabaseName(databaseId)
.setDatabaseAdminClient(client)
.build();
}
@Test
public void testExecuteDdl() throws InterruptedException, ExecutionException {
DatabaseAdminClient client = mock(DatabaseAdminClient.class);
@SuppressWarnings("unchecked")
OperationFuture<Void, UpdateDatabaseDdlMetadata> operation = mock(OperationFuture.class);
when(operation.get()).thenReturn(null);
when(client.updateDatabaseDdl(
eq(instanceId), eq(databaseId), anyListOf(String.class), isNull(String.class)))
.thenReturn(operation);
DdlClient subject = createSubject(client);
String ddl = "CREATE TABLE FOO";
subject.executeDdl(ddl);
verify(client).updateDatabaseDdl(instanceId, databaseId, Collections.singletonList(ddl), null);
subject = createSubject(client);
List<String> ddlList = Arrays.asList("CREATE TABLE FOO", "DROP TABLE FOO");
subject.executeDdl(ddlList);
verify(client).updateDatabaseDdl(instanceId, databaseId, ddlList, null);
}
}
|
/*
* Copyright 2018 tweerlei Wruck + Buchmeier GbR - http://www.tweerlei.de/
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.tweerlei.dbgrazer.query.model.impl;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import de.tweerlei.dbgrazer.query.model.ColumnDef;
import de.tweerlei.dbgrazer.query.model.Query;
import de.tweerlei.dbgrazer.query.model.ResultRow;
import de.tweerlei.dbgrazer.query.model.ResultVisitor;
import de.tweerlei.dbgrazer.query.model.RowIterator;
import de.tweerlei.dbgrazer.query.model.RowSet;
/**
* Default impl.
*
* @author Robert Wruck
*/
public class RowSetImpl implements RowSet
{
private final Query query;
private final int subQueryIndex;
private final List<String> parameters;
private final List<ColumnDef> columns;
private final List<ResultRow> rows;
private final Map<String, Object> attributes;
private long time;
private int affectedRows;
private boolean moreAvailable;
/**
* Constructor
* @param query Query
* @param subQueryIndex Subquery index
* @param columns Column definitions
*/
public RowSetImpl(Query query, int subQueryIndex, List<ColumnDef> columns)
{
this.query = query;
this.subQueryIndex = subQueryIndex;
if (columns == null)
this.columns = new ArrayList<ColumnDef>(0);
else
this.columns = columns;
this.rows = new LinkedList<ResultRow>();
this.attributes = new HashMap<String, Object>();
this.parameters = new ArrayList<String>();
this.affectedRows = -1;
}
@Override
public Query getQuery()
{
return (query);
}
@Override
public List<String> getParameterValues()
{
return (parameters);
}
@Override
public int getSubQueryIndex()
{
return (subQueryIndex);
}
@Override
public List<ColumnDef> getColumns()
{
return (columns);
}
@Override
public List<ResultRow> getRows()
{
return (rows);
}
@Override
public RowIterator iterator()
{
return (new RowSetIterator(this));
}
@Override
public ResultRow getFirstRow()
{
if (rows.isEmpty())
return (EmptyResultRow.getInstance());
return (rows.get(0));
}
@Override
public List<Object> getFirstColumn()
{
final List<Object> ret = new ArrayList<Object>(rows.size());
for (ResultRow row : rows)
ret.add(row.getValues().get(0));
return (ret);
}
@Override
public Object getFirstValue()
{
if (rows.isEmpty())
return (null);
return (rows.get(0).getValues().get(0));
}
@Override
public int getAffectedRows()
{
if (affectedRows < 0)
return (rows.size());
else
return (affectedRows);
}
/**
* Set the affected rows
* @param affectedRows affected row count
*/
public void setAffectedRows(int affectedRows)
{
this.affectedRows = affectedRows;
}
@Override
public boolean isMoreAvailable()
{
return moreAvailable;
}
/**
* Set the moreAvailable
* @param moreAvailable the moreAvailable to set
*/
public void setMoreAvailable(boolean moreAvailable)
{
this.moreAvailable = moreAvailable;
}
@Override
public long getQueryTime()
{
return (time);
}
/**
* Set the time taken
* @param time Milliseconds
*/
public void setQueryTime(long time)
{
this.time = time;
}
@Override
public Map<String, Object> getAttributes()
{
return (attributes);
}
@Override
public void accept(ResultVisitor v, int level)
{
if (v.startRowSet(this))
{
for (ResultRow row : rows)
row.accept(v, level);
v.endRowSet(this);
}
}
@Override
public RowSetImpl clone()
{
final RowSetImpl ret = new RowSetImpl(query, subQueryIndex, new ArrayList<ColumnDef>(columns));
ret.setAffectedRows(affectedRows);
ret.setMoreAvailable(moreAvailable);
ret.setQueryTime(time);
ret.getParameterValues().addAll(parameters);
ret.getAttributes().putAll(attributes);
for (ResultRow row : rows)
ret.getRows().add(row.clone());
return (ret);
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tomcat.util.descriptor.web;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
/**
* Base class for those elements that need to track the encoding used in the
* source XML.
*/
public abstract class XmlEncodingBase {
private Charset charset = StandardCharsets.UTF_8;
public void setCharset(Charset charset) {
this.charset = charset;
}
/**
* Obtain the character encoding of the XML source that was used to
* populated this object.
*
* @return The character encoding of the associated XML source or
* <code>UTF-8</code> if the encoding could not be determined
*/
public Charset getCharset() {
return charset;
}
}
|
/*
* PermissionsEx
* Copyright (C) zml and PermissionsEx contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ca.stellardrift.permissionsex.fabric.impl.bridge;
import ca.stellardrift.permissionsex.subject.SubjectRef;
import net.minecraft.server.command.ServerCommandSource;
import org.checkerframework.checker.nullness.qual.Nullable;
public interface ServerCommandSourceBridge {
/**
* Apply a permission subject override for a [ServerCommandSource].
*/
ServerCommandSource withSubjectOverride(@Nullable SubjectRef<?> override);
/**
* Set a new override on an existing [ServerCommandSource].
*
* Internal use only
*
* [withSubjectOverride] should probably be used instead.
*/
void subjectOverride(@Nullable SubjectRef<?> override);
/**
* Get the subject override for a [ServerCommandSource]
*/
@Nullable SubjectRef<?> subjectOverride();
}
|
import service.Engine;
import java.io.IOException;
public class HospitalDemo {
public static void main(String[] args) throws IOException {
Engine engine = new Engine();
engine.run();
}
}
|
package com.sequenceiq.freeipa.service.image.userdata;
import java.util.Optional;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import javax.inject.Inject;
import org.apache.commons.codec.binary.Base64;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.core.task.AsyncTaskExecutor;
import org.springframework.stereotype.Service;
import com.sequenceiq.cloudbreak.ccm.cloudinit.CcmConnectivityParameters;
import com.sequenceiq.cloudbreak.certificate.PkiUtil;
import com.sequenceiq.cloudbreak.cloud.PlatformParameters;
import com.sequenceiq.cloudbreak.cloud.model.Platform;
import com.sequenceiq.cloudbreak.cloud.service.GetCloudParameterException;
import com.sequenceiq.cloudbreak.dto.ProxyConfig;
import com.sequenceiq.cloudbreak.logger.MDCUtils;
import com.sequenceiq.cloudbreak.service.proxy.ProxyConfigDtoService;
import com.sequenceiq.freeipa.dto.Credential;
import com.sequenceiq.freeipa.entity.SaltSecurityConfig;
import com.sequenceiq.freeipa.entity.SecurityConfig;
import com.sequenceiq.freeipa.entity.Stack;
import com.sequenceiq.freeipa.service.CredentialService;
import com.sequenceiq.freeipa.service.cloud.PlatformParameterService;
import com.sequenceiq.freeipa.service.image.ImageService;
import com.sequenceiq.freeipa.service.stack.StackService;
@Service
public class UserDataService {
private static final Logger LOGGER = LoggerFactory.getLogger(UserDataService.class);
@Inject
private UserDataBuilder userDataBuilder;
@Inject
private PlatformParameterService platformParameterService;
@Inject
private AsyncTaskExecutor intermediateBuilderExecutor;
@Inject
private CredentialService credentialService;
@Inject
private ProxyConfigDtoService proxyConfigDtoService;
@Inject
private StackService stackService;
@Inject
private ImageService imageService;
@Inject
private CcmUserDataService ccmUserDataService;
public void createUserData(Long stackId) {
Stack stack = stackService.getStackById(stackId);
Credential credential = credentialService.getCredentialByEnvCrn(stack.getEnvironmentCrn());
Optional<String> requestId = MDCUtils.getRequestId();
Future<PlatformParameters> platformParametersFuture =
intermediateBuilderExecutor.submit(() -> platformParameterService.getPlatformParameters(requestId, stack, credential));
SecurityConfig securityConfig = stack.getSecurityConfig();
SaltSecurityConfig saltSecurityConfig = securityConfig.getSaltSecurityConfig();
String cbPrivKey = saltSecurityConfig.getSaltBootSignPrivateKey();
byte[] cbSshKeyDer = PkiUtil.getPublicKeyDer(new String(Base64.decodeBase64(cbPrivKey)));
String sshUser = stack.getStackAuthentication().getLoginUserName();
String cbCert = securityConfig.getClientCert();
String saltBootPassword = saltSecurityConfig.getSaltBootPassword();
try {
PlatformParameters platformParameters = platformParametersFuture.get();
CcmConnectivityParameters ccmParameters = ccmUserDataService.fetchAndSaveCcmParameters(stack);
Optional<ProxyConfig> proxyConfig = proxyConfigDtoService.getByEnvironmentCrn(stack.getEnvironmentCrn());
String userData = userDataBuilder.buildUserData(Platform.platform(stack.getCloudPlatform()), cbSshKeyDer, sshUser, platformParameters,
saltBootPassword, cbCert, ccmParameters, proxyConfig.orElse(null));
imageService.decorateImageWithUserDataForStack(stack, userData);
} catch (InterruptedException | ExecutionException e) {
LOGGER.error("Failed to get Platform parmaters", e);
throw new GetCloudParameterException("Failed to get Platform parmaters", e);
}
}
}
|
package org.kie.services.client.api.command;
import org.drools.core.command.CommandService;
import org.drools.core.command.impl.CommandBasedStatefulKnowledgeSession;
import org.jbpm.process.audit.AuditLogService;
import org.jbpm.process.audit.CommandBasedAuditLogService;
import org.jbpm.services.task.events.TaskEventSupport;
import org.jbpm.services.task.impl.command.CommandBasedTaskService;
import org.kie.api.runtime.CommandExecutor;
import org.kie.api.runtime.KieSession;
import org.kie.api.runtime.manager.RuntimeEngine;
import org.kie.api.task.TaskService;
public class RemoteRuntimeEngine implements RuntimeEngine {
private final RemoteConfiguration config;
public RemoteRuntimeEngine(RemoteConfiguration configuration) {
this.config = configuration;
}
public KieSession getKieSession() {
CommandService commandService = new RemoteSessionCommandService(config);
return new CommandBasedStatefulKnowledgeSession(commandService);
}
public TaskService getTaskService() {
CommandExecutor executor = new RemoteTaskCommandExecutor(config);
return new CommandBasedTaskService((CommandService)executor, new TaskEventSupport());
}
public AuditLogService getAuditLogService() {
CommandService commandService = new RemoteSessionCommandService(config);
return new CommandBasedAuditLogService(commandService);
}
}
|
package TableMapping.Fields;
import lombok.Getter;
import lombok.NoArgsConstructor;
import lombok.Setter;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
/**
* Class that represents a SQL field that holds an Enum - it can hold 1 value from a given list of values
*/
@Setter
@Getter
@NoArgsConstructor
//Only 1 of the elements
public class EnumField extends Field{
/**
* List of values the Field can hold
*/
private List<String> elements;
/**
* {@inheritDoc}
*/
@Override
public String writeFieldInfo() {
StringBuilder enumField = new StringBuilder();
enumField.append("Column Type:").append(this.getSqlType()).append("(");
elements.forEach(element -> enumField.append("'").append(element).append("',"));
enumField.deleteCharAt(enumField.length() - 1);
enumField.append(")");
return enumField.toString();
}
/**
* {@inheritDoc}
*/
@Override
public void setFieldInfo(String[] info) {
if (this.isInfoNullOrEmpty(info)) {
this.setSqlType("Not Given");
return;
}
elements = new ArrayList<>();
this.setSqlType(info[0]);
Arrays.stream(info).skip(1).forEach(element -> {
elements.add(element == null ? "NotGiven" : element.substring(1, element.length() - 1));
});
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
EnumField enumField = (EnumField) o;
return new EqualsBuilder().append(elements, enumField.elements).isEquals();
}
@Override
public int hashCode() {
return new HashCodeBuilder(17, 37).append(elements).toHashCode();
}
}
|
/*
* Copyright (c) 2015-present, Parse, LLC.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*/
package com.parse;
import android.content.Context;
import android.database.sqlite.SQLiteDatabase;
import android.database.sqlite.SQLiteOpenHelper;
import bolts.Task;
abstract class ParseSQLiteOpenHelper {
private final SQLiteOpenHelper helper;
public ParseSQLiteOpenHelper(Context context, String name, SQLiteDatabase.CursorFactory factory,
int version) {
helper = new SQLiteOpenHelper(context, name, factory, version) {
@Override
public void onOpen(SQLiteDatabase db) {
super.onOpen(db);
ParseSQLiteOpenHelper.this.onOpen(db);
}
@Override
public void onCreate(SQLiteDatabase db) {
ParseSQLiteOpenHelper.this.onCreate(db);
}
@Override
public void onUpgrade(SQLiteDatabase db, int oldVersion, int newVersion) {
ParseSQLiteOpenHelper.this.onUpgrade(db, oldVersion, newVersion);
}
};
}
public Task<ParseSQLiteDatabase> getReadableDatabaseAsync() {
return getDatabaseAsync(false);
}
public Task<ParseSQLiteDatabase> getWritableDatabaseAsync() {
return getDatabaseAsync(true);
}
private Task<ParseSQLiteDatabase> getDatabaseAsync(final boolean writable) {
return ParseSQLiteDatabase.openDatabaseAsync(
helper, !writable ? SQLiteDatabase.OPEN_READONLY : SQLiteDatabase.OPEN_READWRITE);
}
public void onOpen(SQLiteDatabase db) {
// do nothing
}
public abstract void onCreate(SQLiteDatabase db);
public abstract void onUpgrade(SQLiteDatabase db, int oldVersion, int newVersion);
}
|
package com.gulimall.order;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.junit4.SpringRunner;
@RunWith(SpringRunner.class)
@SpringBootTest
public class GulimallOrderApplicationTests {
@Test
public void contextLoads() {
}
}
|
package com.howoo.myrestaurant.naver.dto;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.Getter;
import lombok.NoArgsConstructor;
import java.util.List;
@Data
@AllArgsConstructor
@NoArgsConstructor
public class SearchImageRes {
private String lastBuildDate;
private List<SearchImageResItem> items;
private int total;
@Data
@AllArgsConstructor
@NoArgsConstructor
public static class SearchImageResItem{
private String title;
private String link;
private String thumbnail;
}
}
|
package com.xiepuhuan.reptile.exception;
/**
* @author xiepuhuan
*/
public class UnsupportedObjectException extends RuntimeException {
public UnsupportedObjectException() {
}
public UnsupportedObjectException(String message) {
super(message);
}
public UnsupportedObjectException(String message, Throwable cause) {
super(message, cause);
}
public UnsupportedObjectException(Throwable cause) {
super(cause);
}
}
|
package com.jeterlee.sample.basis;
import java.lang.annotation.Documented;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import javax.inject.Qualifier;
@Qualifier
@Documented
@Retention(RetentionPolicy.RUNTIME)
public @interface LeatherColor {
String color() default "white";
}
|
/**
* Copyright 2017 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @author Yuriy Dmitriev (ydmitriev@productengine.com)
*/
package com.comcast.apps.e2e.managers;
import com.comcast.apps.e2e.helpers.FilesPathHelper;
import com.comcast.apps.e2e.helpers.ServiceHelper;
import com.comcast.apps.e2e.utils.FileUtil;
import com.comcast.redirector.api.model.testsuite.*;
import com.comcast.redirector.common.RedirectorConstants;
import com.comcast.redirector.common.serializers.SerializerException;
import com.comcast.redirector.endpoint.http.Constants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.function.Function;
import java.util.stream.Collectors;
public class ReportManager {
private static final Logger log = LoggerFactory.getLogger(ReportManager.class);
public final static String END_TO_END_REPORT_ENDPOINT = Constants.SERVICE_URL_PREFIX + RedirectorConstants.END_TO_END_REPORT_PATH;
private final String appName;
private final FileUtil fileUtil;
private final FilesPathHelper filesPathHelper;
private final ServiceHelper serviceHelper;
public ReportManager(String appName, FileUtil fileUtil, FilesPathHelper filesPathHelper, ServiceHelper serviceHelper) {
this.appName = appName;
this.fileUtil = fileUtil;
this.filesPathHelper = filesPathHelper;
this.serviceHelper = serviceHelper;
}
public void createReport(RedirectorTestCaseList testCases) throws IOException, SerializerException {
TestCaseResultList report = prepareReport(testCases);
fileUtil.writeJson(filesPathHelper.getFilename(FilesPathHelper.TestEntity.REPORT, appName), report);
}
private SessionList getSessions() {
Response response = serviceHelper.getRequestBuilder(END_TO_END_REPORT_ENDPOINT, MediaType.APPLICATION_JSON).get();
return response.readEntity(SessionList.class);
}
private TestCaseResultList prepareReport(RedirectorTestCaseList testCases) {
SessionList sessions = getSessions();
TestCaseResultList testCaseResultList = new TestCaseResultList();
Map<String, RedirectorTestCase> nameToTestCaseMap = testCases.getRedirectorTestCases().stream()
.collect(Collectors.toMap(RedirectorTestCase::getName, Function.identity()));
List<TestCaseResult> testCaseResults = Optional.ofNullable(sessions.getSessions()).orElseGet(Collections::emptyList)
.stream()
.map(session -> getTestCaseResult(nameToTestCaseMap, session))
.collect(Collectors.toList());
testCaseResultList.setItems(testCaseResults);
return testCaseResultList;
}
private TestCaseResult getTestCaseResult(Map<String, RedirectorTestCase> expectedResults, Session session) {
TestCaseResult testCaseResult = new TestCaseResult();
String testCaseName = session.getId();
RedirectorTestCase originalTestCase = expectedResults.get(testCaseName);
TestSuiteResponse expectedResult = originalTestCase.getExpected();
RedirectorTestCase redirectorTestCase = new RedirectorTestCase();
redirectorTestCase.setName(testCaseName);
redirectorTestCase.setParameters(originalTestCase.getParameters());
redirectorTestCase.setApplication(appName);
redirectorTestCase.setExpected(expectedResult);
redirectorTestCase.setRuleUnderTest(originalTestCase.getRuleUnderTest());
TestSuiteResponse actualResult = session.getActual();
testCaseResult.setStatus(TestCaseResult.Status.fromTestCase(expectedResult, actualResult));
testCaseResult.setActual(actualResult);
testCaseResult.setTestCase(redirectorTestCase);
testCaseResult.setLogs(getSessionLogs(session));
return testCaseResult;
}
private List<String> getSessionLogs(Session session) {
return session.getEvents().stream().map(Event::getMessage).collect(Collectors.toList());
}
public void clearSessionLog() {
Response response = serviceHelper.getRequestBuilder(END_TO_END_REPORT_ENDPOINT, MediaType.APPLICATION_JSON).delete();
if (response.getStatus() == HttpURLConnection.HTTP_OK) {
log.info("Session log has been cleared successfully");
} else {
log.error("Session log has not been cleared");
}
}
}
|
package quick.pager.shop.controller.system;
import java.util.Objects;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.security.access.prepost.PreAuthorize;
import org.springframework.web.bind.annotation.DeleteMapping;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.PutMapping;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
import quick.pager.shop.constants.Constants;
import quick.pager.shop.constants.ResponseStatus;
import quick.pager.shop.param.system.MenuOtherParam;
import quick.pager.shop.param.system.MenuSaveParam;
import quick.pager.shop.service.system.MenuService;
import quick.pager.shop.user.response.Response;
import quick.pager.shop.utils.Assert;
/**
* 菜单管理
*
* @author siguiyang
* @version 3.0
*/
@RestController
@RequestMapping(Constants.Module.MANAGE)
public class MenuController {
@Autowired
private MenuService menuService;
/**
* 系统菜单列表
*/
@PreAuthorize("hasAuthority('PAGER_SYSTEM_MENU')")
@PostMapping("/menu/list")
public Response list(@RequestBody MenuOtherParam param) {
return menuService.queryList(param);
}
/**
* 新增
*/
@PreAuthorize("hasAuthority('PAGER_SYSTEM_MENU_CREATE')")
@PostMapping("/menu/create")
public Response create(@RequestBody MenuSaveParam param) {
return menuService.create(param);
}
/**
* 修改
*/
@PreAuthorize("hasAuthority('PAGER_SYSTEM_MENU_MODIFY')")
@PutMapping("/menu/modify")
public Response<Long> modify(@RequestBody MenuSaveParam param) {
Assert.isTrue(Objects.nonNull(param.getId()), () -> ResponseStatus.PARAMS_EXCEPTION);
return menuService.modify(param);
}
/**
* 删除
*/
@PreAuthorize("hasAuthority('PAGER_SYSTEM_MENU_DELETE')")
@DeleteMapping("/menu/{id}")
public Response delete(@PathVariable("id") Long id) {
return menuService.delete(id);
}
}
|
/*
* Copyright (c) 2010-2016. Axon Framework
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.axonframework.eventhandling.saga;
import org.junit.Before;
import org.junit.Test;
import java.util.Iterator;
import static org.junit.Assert.*;
/**
* @author Allard Buijze
*/
public class AssociationValuesImplTest {
private AssociationValuesImpl testSubject;
private AssociationValue associationValue;
@Before
public void setUp() throws Exception {
testSubject = new AssociationValuesImpl();
associationValue = new AssociationValue("key", "value");
}
@Test
public void testAddAssociationValue() throws Exception {
testSubject.add(associationValue);
assertEquals(1, testSubject.addedAssociations().size());
assertTrue(testSubject.removedAssociations().isEmpty());
}
@Test
public void testAddAssociationValue_AddedTwice() throws Exception {
testSubject.add(associationValue);
testSubject.commit();
testSubject.add(associationValue);
assertTrue(testSubject.addedAssociations().isEmpty());
assertTrue(testSubject.removedAssociations().isEmpty());
}
@Test
public void testRemoveAssociationValue() {
assertTrue(testSubject.add(associationValue));
testSubject.commit();
assertTrue(testSubject.remove(associationValue));
assertTrue(testSubject.addedAssociations().isEmpty());
assertEquals(1, testSubject.removedAssociations().size());
}
@Test
public void testRemoveAssociationValue_NotInContainer() {
testSubject.remove(associationValue);
assertTrue(testSubject.addedAssociations().isEmpty());
assertTrue(testSubject.removedAssociations().isEmpty());
}
@Test
public void testAddAndRemoveEntry() {
testSubject.add(associationValue);
testSubject.remove(associationValue);
assertTrue(testSubject.addedAssociations().isEmpty());
assertTrue(testSubject.removedAssociations().isEmpty());
}
@Test
public void testContains() {
assertFalse(testSubject.contains(associationValue));
testSubject.add(associationValue);
assertTrue(testSubject.contains(associationValue));
assertTrue(testSubject.contains(new AssociationValue("key", "value")));
testSubject.remove(associationValue);
assertFalse(testSubject.contains(associationValue));
}
@Test
public void testAsSet() {
testSubject.add(associationValue);
int t = 0;
for (AssociationValue actual : testSubject.asSet()) {
assertSame(associationValue, actual);
t++;
}
assertEquals(1, t);
}
@Test
public void testIterator() {
testSubject.add(associationValue);
Iterator<AssociationValue> iterator = testSubject.iterator();
assertSame(associationValue, iterator.next());
assertFalse(iterator.hasNext());
}
}
|
package com.yilian.networkingmodule.entity;
import java.io.Serializable;
/**
* Created by on 2017/6/23 0023.
* JS拉起微信或支付宝时传递的订单信息Json对应的实体类
*/
public class JsPayClass implements Serializable{
/**
* {
* orderString = "";
* "order_index" = 327;
* "payment_apply_time" = 1498027130;
* "payment_fee" = "0.01";
* "paymentIndex" = 2017062114385037463;
* }
*/
public String orderString;//使用支付宝支付时,系统返回的支付宝订单信息
public String order_index;//订单ID
public String payment_apply_time;//
public String payment_fee;
public String payment_index;
}
|
package com.commercetools.history.models.change;
import java.time.*;
import java.util.*;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.annotation.*;
import io.vrap.rmf.base.client.utils.Generated;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.commons.lang3.builder.HashCodeBuilder;
@Generated(value = "io.vrap.rmf.codegen.rendring.CoreCodeGenerator", comments = "https://github.com/vrapio/rmf-codegen")
public final class SetVatIdChangeImpl implements SetVatIdChange {
private String type;
private String change;
private String previousValue;
private String nextValue;
@JsonCreator
SetVatIdChangeImpl(@JsonProperty("change") final String change,
@JsonProperty("previousValue") final String previousValue,
@JsonProperty("nextValue") final String nextValue) {
this.change = change;
this.previousValue = previousValue;
this.nextValue = nextValue;
this.type = SET_VAT_ID_CHANGE;
}
public SetVatIdChangeImpl() {
this.type = SET_VAT_ID_CHANGE;
}
public String getType() {
return this.type;
}
/**
* <p>Shape of the action for <code>setVatId</code></p>
*/
public String getChange() {
return this.change;
}
public String getPreviousValue() {
return this.previousValue;
}
public String getNextValue() {
return this.nextValue;
}
public void setChange(final String change) {
this.change = change;
}
public void setPreviousValue(final String previousValue) {
this.previousValue = previousValue;
}
public void setNextValue(final String nextValue) {
this.nextValue = nextValue;
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
SetVatIdChangeImpl that = (SetVatIdChangeImpl) o;
return new EqualsBuilder().append(type, that.type)
.append(change, that.change)
.append(previousValue, that.previousValue)
.append(nextValue, that.nextValue)
.isEquals();
}
@Override
public int hashCode() {
return new HashCodeBuilder(17, 37).append(type)
.append(change)
.append(previousValue)
.append(nextValue)
.toHashCode();
}
}
|
package com.simibubi.create.content.contraptions.components.structureMovement.pulley;
import com.mojang.blaze3d.matrix.MatrixStack;
import com.mojang.blaze3d.vertex.IVertexBuilder;
import com.simibubi.create.AllBlockPartials;
import com.simibubi.create.content.contraptions.base.IRotate;
import com.simibubi.create.content.contraptions.base.KineticTileEntity;
import com.simibubi.create.content.contraptions.base.KineticTileEntityRenderer;
import com.simibubi.create.foundation.render.SuperByteBuffer;
import com.simibubi.create.foundation.render.backend.FastRenderDispatcher;
import com.simibubi.create.foundation.utility.AngleHelper;
import net.minecraft.block.BlockState;
import net.minecraft.client.renderer.IRenderTypeBuffer;
import net.minecraft.client.renderer.RenderType;
import net.minecraft.client.renderer.WorldRenderer;
import net.minecraft.client.renderer.tileentity.TileEntityRendererDispatcher;
import net.minecraft.util.Direction;
import net.minecraft.util.Direction.Axis;
import net.minecraft.util.Direction.AxisDirection;
import net.minecraft.util.math.BlockPos;
import net.minecraft.world.IWorld;
import net.minecraft.world.World;
public abstract class AbstractPulleyRenderer extends KineticTileEntityRenderer {
private AllBlockPartials halfRope;
private AllBlockPartials halfMagnet;
public AbstractPulleyRenderer(TileEntityRendererDispatcher dispatcher, AllBlockPartials halfRope,
AllBlockPartials halfMagnet) {
super(dispatcher);
this.halfRope = halfRope;
this.halfMagnet = halfMagnet;
}
@Override
public boolean isGlobalRenderer(KineticTileEntity p_188185_1_) {
return true;
}
@Override
protected void renderSafe(KineticTileEntity te, float partialTicks, MatrixStack ms, IRenderTypeBuffer buffer,
int light, int overlay) {
if (FastRenderDispatcher.available(te.getWorld())) return;
super.renderSafe(te, partialTicks, ms, buffer, light, overlay);
float offset = getOffset(te, partialTicks);
boolean running = isRunning(te);
Axis rotationAxis = ((IRotate) te.getBlockState()
.getBlock()).getRotationAxis(te.getBlockState());
kineticRotationTransform(getRotatedCoil(te), te, rotationAxis, AngleHelper.rad(offset * 180), light)
.renderInto(ms, buffer.getBuffer(RenderType.getSolid()));
World world = te.getWorld();
BlockState blockState = te.getBlockState();
BlockPos pos = te.getPos();
SuperByteBuffer halfMagnet = this.halfMagnet.renderOn(blockState);
SuperByteBuffer halfRope = this.halfRope.renderOn(blockState);
SuperByteBuffer magnet = renderMagnet(te);
SuperByteBuffer rope = renderRope(te);
IVertexBuilder vb = buffer.getBuffer(RenderType.getSolid());
if (running || offset == 0)
renderAt(world, offset > .25f ? magnet : halfMagnet, offset, pos, ms, vb);
float f = offset % 1;
if (offset > .75f && (f < .25f || f > .75f))
renderAt(world, halfRope, f > .75f ? f - 1 : f, pos, ms, vb);
if (!running)
return;
for (int i = 0; i < offset - 1.25f; i++)
renderAt(world, rope, offset - i - 1, pos, ms, vb);
}
private void renderAt(IWorld world, SuperByteBuffer partial, float offset, BlockPos pulleyPos, MatrixStack ms,
IVertexBuilder buffer) {
BlockPos actualPos = pulleyPos.down((int) offset);
int light = WorldRenderer.getLightmapCoordinates(world, world.getBlockState(actualPos), actualPos);
partial.translate(0, -offset, 0)
.light(light)
.renderInto(ms, buffer);
}
protected abstract Axis getShaftAxis(KineticTileEntity te);
protected abstract AllBlockPartials getCoil();
protected abstract SuperByteBuffer renderRope(KineticTileEntity te);
protected abstract SuperByteBuffer renderMagnet(KineticTileEntity te);
protected abstract float getOffset(KineticTileEntity te, float partialTicks);
protected abstract boolean isRunning(KineticTileEntity te);
@Override
protected BlockState getRenderedBlockState(KineticTileEntity te) {
return shaft(getShaftAxis(te));
}
protected SuperByteBuffer getRotatedCoil(KineticTileEntity te) {
BlockState blockState = te.getBlockState();
return getCoil().renderOnDirectionalSouth(blockState,
Direction.getFacingFromAxis(AxisDirection.POSITIVE, getShaftAxis(te)));
}
}
|
package com.example.ianno.simplecontactwithorm.Activity;
import android.app.Activity;
import android.content.Intent;
import android.support.v7.app.AppCompatActivity;
import android.os.Bundle;
import android.widget.TextView;
import android.widget.Toast;
import com.example.ianno.simplecontactwithorm.Entity.Contact;
import com.example.ianno.simplecontactwithorm.R;
public class DetailContact extends AppCompatActivity {
private TextView textViewDetailFullname;
private TextView textViewDetailPhone;
private TextView textViewDetailEmail;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_detail_contact);
textViewDetailFullname = (TextView)findViewById(R.id.detailFullname);
textViewDetailPhone = (TextView)findViewById(R.id.detailPhone);
textViewDetailEmail = (TextView)findViewById(R.id.detailEmail);
Bundle bundle = getIntent().getExtras();
String contactId = bundle.getString("contactId");
Contact contact = Contact.findById(Contact.class, Integer.parseInt(contactId));
String contactName = contact.getContactName();
String contactPhone= contact.getContactPhone();
String contactEmail= contact.getContactEmail();
textViewDetailFullname.setText(contactName);
textViewDetailPhone.setText(contactPhone);
textViewDetailEmail.setText(contactEmail);
}
@Override
public void onBackPressed() {
Intent intent = new Intent();
intent.putExtra("result", "");
setResult(Activity.RESULT_CANCELED, intent);
finish();
}
}
|
package com.project.lottiebottombar;
import android.os.Bundle;
import androidx.fragment.app.Fragment;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
public class GiftsFragment extends Fragment {
public GiftsFragment() {
// Required empty public constructor
}
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container,
Bundle savedInstanceState) {
// Inflate the layout for this fragment
return inflater.inflate(R.layout.fragment_gifts, container, false);
}
}
|
package me.salamander.cctransformer.util;
import io.netty.util.internal.PlatformDependent;
import it.unimi.dsi.fastutil.longs.*;
import net.minecraft.core.BlockPos;
import java.util.NoSuchElementException;
/**
* A fast hash-map implementation for 3-dimensional vectors with {@code int} components, mapped to unsigned {@code byte} values.
* <p>
* Optimized for the case where queries will be close to each other.
* <p>
* Buckets are arranged into a doubly linked list, which allows efficient iteration when the table is sparse and is crucial to keep {@link #poll(EntryConsumer)}'s average runtime nearly
* constant.
* <p>
* Not thread-safe. Attempting to use this concurrently from multiple threads will likely have catastrophic results (read: JVM crashes).
*
* @author DaPorkchop_
*/
public class Int3UByteLinkedHashMap implements AutoCloseable {
public static final int DEFAULT_RETURN_VALUE = -1;
protected static final int BUCKET_AXIS_BITS = 2; //the number of bits per axis which are used inside of the bucket rather than identifying the bucket
protected static final int BUCKET_AXIS_MASK = (1 << BUCKET_AXIS_BITS) - 1;
protected static final int BUCKET_SIZE = 1 << (BUCKET_AXIS_BITS * 3); //the number of entries per bucket
/*
* struct key_t {
* int x;
* int y;
* int z;
* };
*/
protected static final long KEY_X_OFFSET = 0L;
protected static final long KEY_Y_OFFSET = KEY_X_OFFSET + Integer.BYTES;
protected static final long KEY_Z_OFFSET = KEY_Y_OFFSET + Integer.BYTES;
protected static final long KEY_BYTES = KEY_Z_OFFSET + Integer.BYTES;
/*
* struct value_t {
* long flags;
* byte vals[BUCKET_SIZE];
* };
*/
protected static final long VALUE_FLAGS_OFFSET = 0L;
protected static final long VALUE_VALS_OFFSET = VALUE_FLAGS_OFFSET + Long.BYTES;
protected static final long VALUE_BYTES = VALUE_VALS_OFFSET + BUCKET_SIZE * Byte.BYTES;
/*
* struct bucket_t {
* key_t key;
* value_t value;
* long prevIndex;
* long nextIndex;
* };
*/
protected static final long BUCKET_KEY_OFFSET = 0L;
protected static final long BUCKET_VALUE_OFFSET = BUCKET_KEY_OFFSET + KEY_BYTES;
protected static final long BUCKET_PREVINDEX_OFFSET = BUCKET_VALUE_OFFSET + VALUE_BYTES;
protected static final long BUCKET_NEXTINDEX_OFFSET = BUCKET_PREVINDEX_OFFSET + Long.BYTES;
protected static final long BUCKET_BYTES = BUCKET_NEXTINDEX_OFFSET + Long.BYTES;
protected static final long DEFAULT_TABLE_SIZE = 16L;
static {
if (!PlatformDependent.isUnaligned()) {
throw new AssertionError("your CPU doesn't support unaligned memory access!");
}
}
protected long tableAddr = 0L; //the address of the table in memory
protected long tableSize = 0L; //the physical size of the table (in buckets). always a non-zero power of two
protected long resizeThreshold = 0L;
protected long usedBuckets = 0L;
protected long size = 0L; //the number of values stored in the set
protected long firstBucketIndex = -1L; //index of the first known assigned bucket in the list
protected long lastBucketIndex = -1L; //index of the last known assigned bucket in the list
protected boolean closed = false;
public Int3UByteLinkedHashMap() {
this.setTableSize(DEFAULT_TABLE_SIZE);
}
public Int3UByteLinkedHashMap(int initialCapacity) {
initialCapacity = (int) Math.ceil(initialCapacity * (1.0d / 0.75d)); //scale according to resize threshold
initialCapacity = 1 << (Integer.SIZE - Integer.numberOfLeadingZeros(initialCapacity - 1)); //round up to next power of two
this.setTableSize(Math.max(initialCapacity, DEFAULT_TABLE_SIZE));
}
protected Int3UByteLinkedHashMap(Int3UByteLinkedHashMap src) {
if (src.tableAddr != 0L) { //source table is allocated, let's copy it
long tableSizeBytes = src.tableSize * BUCKET_BYTES;
this.tableAddr = PlatformDependent.allocateMemory(tableSizeBytes);
PlatformDependent.copyMemory(src.tableAddr, this.tableAddr, tableSizeBytes);
}
this.tableSize = src.tableSize;
this.resizeThreshold = src.resizeThreshold;
this.usedBuckets = src.usedBuckets;
this.size = src.size;
this.firstBucketIndex = src.firstBucketIndex;
this.lastBucketIndex = src.lastBucketIndex;
}
/**
* Faster memcpy routine (for small ranges) which JIT can optimize specifically for the range size.
*
* @param srcAddr the source address
* @param dstAddr the destination address
*/
protected static void memcpy(long srcAddr, long dstAddr, long size) {
long offset = 0L;
while (size - offset >= Long.BYTES) { //copy as many longs as possible
PlatformDependent.putLong(dstAddr + offset, PlatformDependent.getLong(srcAddr + offset));
offset += Long.BYTES;
}
while (size - offset >= Integer.BYTES) { //pad with ints
PlatformDependent.putInt(dstAddr + offset, PlatformDependent.getInt(srcAddr + offset));
offset += Integer.BYTES;
}
while (size - offset >= Byte.BYTES) { //pad with bytes
PlatformDependent.putByte(dstAddr + offset, PlatformDependent.getByte(srcAddr + offset));
offset += Byte.BYTES;
}
assert offset == size;
}
protected static long hashPosition(int x, int y, int z) {
return x * 1403638657883916319L //some random prime numbers
+ y * 4408464607732138253L
+ z * 2587306874955016303L;
}
protected static int positionIndex(int x, int y, int z) {
return ((x & BUCKET_AXIS_MASK) << (BUCKET_AXIS_BITS * 2)) | ((y & BUCKET_AXIS_MASK) << BUCKET_AXIS_BITS) | (z & BUCKET_AXIS_MASK);
}
protected static long positionFlag(int x, int y, int z) {
return 1L << positionIndex(x, y, z);
}
protected static long allocateTable(long tableSize) {
long size = tableSize * BUCKET_BYTES;
long addr = PlatformDependent.allocateMemory(size); //allocate
PlatformDependent.setMemory(addr, size, (byte) 0); //clear
return addr;
}
/**
* Inserts an entry into this map at the given position with the given value.
* <p>
* If an entry with the given position is already present in this map, it will be replaced.
*
* @param x the position's X coordinate
* @param y the position's Y coordinate
* @param z the position's Z coordinate
* @param value the value to insert. Must be an unsigned {@code byte}
*
* @return the previous entry's value, or {@link #DEFAULT_RETURN_VALUE} if no such entry was present
*
* @see java.util.Map#put(Object, Object)
*/
public int put(int x, int y, int z, int value) {
assert (value & 0xFF) == value : "value not in range [0,255]: " + value;
int index = positionIndex(x, y, z);
long flag = positionFlag(x, y, z);
long bucket = this.findBucket(x >> BUCKET_AXIS_BITS, y >> BUCKET_AXIS_BITS, z >> BUCKET_AXIS_BITS, true);
int oldValue;
long flags = PlatformDependent.getLong(bucket + BUCKET_VALUE_OFFSET + VALUE_FLAGS_OFFSET);
if ((flags & flag) == 0L) { //flag wasn't previously set
PlatformDependent.putLong(bucket + BUCKET_VALUE_OFFSET + VALUE_FLAGS_OFFSET, flags | flag);
this.size++; //the position was newly added, so we need to increment the total size
oldValue = DEFAULT_RETURN_VALUE;
} else { //the flag was already set
oldValue = PlatformDependent.getByte(bucket + BUCKET_VALUE_OFFSET + VALUE_VALS_OFFSET + index * Byte.BYTES) & 0xFF;
}
//store value into bucket
PlatformDependent.putByte(bucket + BUCKET_VALUE_OFFSET + VALUE_VALS_OFFSET + index * Byte.BYTES, (byte) value);
return oldValue;
}
/**
* Inserts an entry into this map at the given position with the given value.
* <p>
* If an entry with the given position is already present in this map, the map will not be modified.
*
* @param x the position's X coordinate
* @param y the position's Y coordinate
* @param z the position's Z coordinate
* @param value the value to insert. Must be an unsigned {@code byte}
*
* @return the previous entry's value, or {@link #DEFAULT_RETURN_VALUE} if no such entry was present and the entry was inserted
*
* @see java.util.Map#putIfAbsent(Object, Object)
*/
public int putIfAbsent(int x, int y, int z, int value) {
assert (value & 0xFF) == value : "value not in range [0,255]: " + value;
int index = positionIndex(x, y, z);
long flag = positionFlag(x, y, z);
long bucket = this.findBucket(x >> BUCKET_AXIS_BITS, y >> BUCKET_AXIS_BITS, z >> BUCKET_AXIS_BITS, true);
long flags = PlatformDependent.getLong(bucket + BUCKET_VALUE_OFFSET + VALUE_FLAGS_OFFSET);
if ((flags & flag) == 0L) { //flag wasn't previously set
PlatformDependent.putLong(bucket + BUCKET_VALUE_OFFSET + VALUE_FLAGS_OFFSET, flags | flag);
this.size++; //the position was newly added, so we need to increment the total size
PlatformDependent.putByte(bucket + BUCKET_VALUE_OFFSET + VALUE_VALS_OFFSET + index * Byte.BYTES, (byte) value);
return DEFAULT_RETURN_VALUE;
} else { //the flag was already set
return PlatformDependent.getByte(bucket + BUCKET_VALUE_OFFSET + VALUE_VALS_OFFSET + index * Byte.BYTES) & 0xFF;
}
}
/**
* Checks whether or not an entry at the given position is present in this map.
*
* @param x the position's X coordinate
* @param y the position's Y coordinate
* @param z the position's Z coordinate
*
* @return whether or not the position is present
*
* @see java.util.Map#containsKey(Object)
*/
public boolean containsKey(int x, int y, int z) {
long flag = positionFlag(x, y, z);
long bucket = this.findBucket(x >> BUCKET_AXIS_BITS, y >> BUCKET_AXIS_BITS, z >> BUCKET_AXIS_BITS, false);
return bucket != 0L //bucket exists
&& (PlatformDependent.getLong(bucket + BUCKET_VALUE_OFFSET + VALUE_FLAGS_OFFSET) & flag) != 0L; //flag is set
}
/**
* Gets the value of the entry associated with the given position.
*
* @param x the position's X coordinate
* @param y the position's Y coordinate
* @param z the position's Z coordinate
*
* @return the entry's value, or {@link #DEFAULT_RETURN_VALUE} if no such entry was present
*
* @see java.util.Map#get(Object)
*/
public int get(int x, int y, int z) {
int index = positionIndex(x, y, z);
long flag = positionFlag(x, y, z);
long bucket = this.findBucket(x >> BUCKET_AXIS_BITS, y >> BUCKET_AXIS_BITS, z >> BUCKET_AXIS_BITS, false);
if (bucket != 0L //bucket exists
&& (PlatformDependent.getLong(bucket + BUCKET_VALUE_OFFSET + VALUE_FLAGS_OFFSET) & flag) != 0L) { //flag is set
return PlatformDependent.getByte(bucket + BUCKET_VALUE_OFFSET + VALUE_VALS_OFFSET + index * Byte.BYTES) & 0xFF;
} else { //bucket doesn't exist or doesn't contain the position
return DEFAULT_RETURN_VALUE;
}
}
protected long findBucket(int x, int y, int z, boolean createIfAbsent) {
long tableSize = this.tableSize;
long tableAddr = this.tableAddr;
if (tableAddr == 0L) {
if (createIfAbsent) { //the table hasn't been allocated yet - let's make a new one!
this.tableAddr = tableAddr = allocateTable(tableSize);
} else { //the table isn't even allocated yet, so the bucket clearly isn't present
return 0L;
}
}
long mask = tableSize - 1L; //tableSize is always a power of two, so we can safely create a bitmask like this
long hash = hashPosition(x, y, z);
for (long i = 0L; ; i++) {
long bucketIndex = (hash + i) & mask;
long bucketAddr = tableAddr + bucketIndex * BUCKET_BYTES;
if (PlatformDependent.getLong(bucketAddr + BUCKET_VALUE_OFFSET + VALUE_FLAGS_OFFSET) == 0L) { //if the value's flags are 0, it means the bucket hasn't been assigned yet
if (createIfAbsent) {
if (this.usedBuckets < this.resizeThreshold) { //let's assign the bucket to our current position
this.usedBuckets++;
PlatformDependent.putInt(bucketAddr + BUCKET_KEY_OFFSET + KEY_X_OFFSET, x);
PlatformDependent.putInt(bucketAddr + BUCKET_KEY_OFFSET + KEY_Y_OFFSET, y);
PlatformDependent.putInt(bucketAddr + BUCKET_KEY_OFFSET + KEY_Z_OFFSET, z);
//add bucket to linked list
long prevBucketIndex = -1L;
long nextBucketIndex = -1L;
if (this.firstBucketIndex < 0L) { //no other buckets exist
this.firstBucketIndex = bucketIndex;
} else { //there are other buckets, let's insert this bucket at the back of the list
prevBucketIndex = this.lastBucketIndex;
long prevBucketAddr = tableAddr + prevBucketIndex * BUCKET_BYTES;
PlatformDependent.putLong(prevBucketAddr + BUCKET_NEXTINDEX_OFFSET, bucketIndex);
}
PlatformDependent.putLong(bucketAddr + BUCKET_PREVINDEX_OFFSET, prevBucketIndex);
PlatformDependent.putLong(bucketAddr + BUCKET_NEXTINDEX_OFFSET, nextBucketIndex);
this.lastBucketIndex = bucketIndex;
return bucketAddr;
} else {
//we've established that there's no matching bucket, but the table is full. let's resize it before allocating a bucket
// to avoid overfilling the table
this.resize();
return this.findBucket(x, y, z, createIfAbsent); //tail recursion will probably be optimized away
}
} else { //empty bucket, abort search - there won't be anything else later on
return 0L;
}
}
//the bucket is set. check coordinates to see if it matches the one we're searching for
if (PlatformDependent.getInt(bucketAddr + BUCKET_KEY_OFFSET + KEY_X_OFFSET) == x
&& PlatformDependent.getInt(bucketAddr + BUCKET_KEY_OFFSET + KEY_Y_OFFSET) == y
&& PlatformDependent.getInt(bucketAddr + BUCKET_KEY_OFFSET + KEY_Z_OFFSET) == z) { //we found the matching bucket!
return bucketAddr;
}
//continue search...
}
}
protected void resize() {
long oldTableSize = this.tableSize;
long oldTableAddr = this.tableAddr;
//allocate new table
long newTableSize = oldTableSize << 1L;
this.setTableSize(newTableSize);
long newTableAddr = this.tableAddr = allocateTable(newTableSize);
long newMask = newTableSize - 1L;
//iterate through every bucket in the old table and copy it to the new one
for (long oldBucketIndex = 0; oldBucketIndex < oldTableSize; oldBucketIndex++) {
long oldBucketAddr = oldTableAddr + oldBucketIndex * BUCKET_BYTES;
//read the key into registers
int x = PlatformDependent.getInt(oldBucketAddr + BUCKET_KEY_OFFSET + KEY_X_OFFSET);
int y = PlatformDependent.getInt(oldBucketAddr + BUCKET_KEY_OFFSET + KEY_Y_OFFSET);
int z = PlatformDependent.getInt(oldBucketAddr + BUCKET_KEY_OFFSET + KEY_Z_OFFSET);
if (PlatformDependent.getLong(oldBucketAddr + BUCKET_VALUE_OFFSET + VALUE_FLAGS_OFFSET) == 0L) { //the bucket is unset, so there's no reason to copy it
continue;
}
for (long hash = hashPosition(x, y, z), j = 0L; ; j++) {
long newBucketAddr = newTableAddr + ((hash + j) & newMask) * BUCKET_BYTES;
if (PlatformDependent.getLong(newBucketAddr + BUCKET_VALUE_OFFSET + VALUE_FLAGS_OFFSET) == 0L) { //if the bucket value is 0, it means the bucket hasn't been assigned yet
//write bucket into new table
PlatformDependent.putInt(newBucketAddr + BUCKET_KEY_OFFSET + KEY_X_OFFSET, x);
PlatformDependent.putInt(newBucketAddr + BUCKET_KEY_OFFSET + KEY_Y_OFFSET, y);
PlatformDependent.putInt(newBucketAddr + BUCKET_KEY_OFFSET + KEY_Z_OFFSET, z);
memcpy(oldBucketAddr + BUCKET_VALUE_OFFSET, newBucketAddr + BUCKET_VALUE_OFFSET, VALUE_BYTES);
PlatformDependent.putLong(newBucketAddr + BUCKET_PREVINDEX_OFFSET, -1L);
PlatformDependent.putLong(newBucketAddr + BUCKET_NEXTINDEX_OFFSET, -1L);
break; //advance to next bucket in old table
}
//continue search...
}
}
//delete old table
PlatformDependent.freeMemory(oldTableAddr);
//iterate through every bucket in the new table and append non-empty buckets to the new linked list
long prevBucketIndex = -1L;
for (long bucketIndex = 0; bucketIndex < newTableSize; bucketIndex++) {
long bucketAddr = newTableAddr + bucketIndex * BUCKET_BYTES;
if (PlatformDependent.getLong(bucketAddr + BUCKET_VALUE_OFFSET + VALUE_FLAGS_OFFSET) == 0L) { //the bucket is unset, so there's no reason to add it to the list
continue;
}
if (prevBucketIndex < 0L) { //this is first bucket we've encountered in the list so far
this.firstBucketIndex = bucketIndex;
} else { //append current bucket to list
long prevBucketAddr = newTableAddr + prevBucketIndex * BUCKET_BYTES;
PlatformDependent.putLong(prevBucketAddr + BUCKET_NEXTINDEX_OFFSET, bucketIndex);
PlatformDependent.putLong(bucketAddr + BUCKET_PREVINDEX_OFFSET, prevBucketIndex);
}
prevBucketIndex = bucketIndex;
}
this.lastBucketIndex = prevBucketIndex;
}
/**
* Runs the given callback function on every entry in this map.
* <p>
* The callback function must not modify this map.
*
* @param action the callback function
*
* @see java.util.Map#forEach(java.util.function.BiConsumer)
*/
public void forEach(EntryConsumer action) {
if (this.tableAddr == 0L //table hasn't even been allocated
|| this.isEmpty()) { //no entries are present
return; //there's nothing to iterate over...
}
if (this.usedBuckets >= (this.tableSize >> 1L)) { //table is at least half-full
this.forEachFull(action);
} else {
this.forEachSparse(action);
}
}
protected void forEachFull(EntryConsumer action) { //optimized for the case where the table is mostly full
//haha yes, c-style iterators
for (long bucketAddr = this.tableAddr, end = bucketAddr + this.tableSize * BUCKET_BYTES; bucketAddr != end; bucketAddr += BUCKET_BYTES) {
this.forEachInBucket(action, bucketAddr);
}
}
protected void forEachSparse(EntryConsumer action) { //optimized for the case where the table is mostly empty
long tableAddr = this.tableAddr;
for (long bucketIndex = this.firstBucketIndex, bucketAddr = tableAddr + bucketIndex * BUCKET_BYTES;
bucketIndex >= 0L;
bucketIndex = PlatformDependent.getLong(bucketAddr + BUCKET_NEXTINDEX_OFFSET), bucketAddr = tableAddr + bucketIndex * BUCKET_BYTES) {
this.forEachInBucket(action, bucketAddr);
}
}
protected void forEachInBucket(EntryConsumer action, long bucketAddr) {
//read the bucket's key and flags into registers
int bucketX = PlatformDependent.getInt(bucketAddr + BUCKET_KEY_OFFSET + KEY_X_OFFSET);
int bucketY = PlatformDependent.getInt(bucketAddr + BUCKET_KEY_OFFSET + KEY_Y_OFFSET);
int bucketZ = PlatformDependent.getInt(bucketAddr + BUCKET_KEY_OFFSET + KEY_Z_OFFSET);
long flags = PlatformDependent.getLong(bucketAddr + BUCKET_VALUE_OFFSET + VALUE_FLAGS_OFFSET);
while (flags != 0L) {
//this is intrinsic and compiles into TZCNT, which has a latency of 3 cycles - much faster than iterating through all 64 bits
// and checking each one individually!
int index = Long.numberOfTrailingZeros(flags);
//clear the bit in question so that it won't be returned next time around
flags &= ~(1L << index);
int dx = index >> (BUCKET_AXIS_BITS * 2);
int dy = (index >> BUCKET_AXIS_BITS) & BUCKET_AXIS_MASK;
int dz = index & BUCKET_AXIS_MASK;
int val = PlatformDependent.getByte(bucketAddr + BUCKET_VALUE_OFFSET + VALUE_VALS_OFFSET + index * Byte.BYTES) & 0xFF;
action.accept((bucketX << BUCKET_AXIS_BITS) + dx, (bucketY << BUCKET_AXIS_BITS) + dy, (bucketZ << BUCKET_AXIS_BITS) + dz, val);
}
}
/**
* Removes the entry at the given position from this map.
*
* @param x the position's X coordinate
* @param y the position's Y coordinate
* @param z the position's Z coordinate
*
* @return the old value at the given position, or {@link #DEFAULT_RETURN_VALUE} if the position wasn't present
*
* @see java.util.Map#remove(Object)
*/
public int remove(int x, int y, int z) {
long tableAddr = this.tableAddr;
if (tableAddr == 0L) { //the table isn't even allocated yet, there's nothing to remove...
return DEFAULT_RETURN_VALUE;
}
long mask = this.tableSize - 1L; //tableSize is always a power of two, so we can safely create a bitmask like this
long flag = positionFlag(x, y, z);
int searchBucketX = x >> BUCKET_AXIS_BITS;
int searchBucketY = y >> BUCKET_AXIS_BITS;
int searchBucketZ = z >> BUCKET_AXIS_BITS;
long hash = hashPosition(searchBucketX, searchBucketY, searchBucketZ);
for (long i = 0L; ; i++) {
long bucketIndex = (hash + i) & mask;
long bucketAddr = tableAddr + bucketIndex * BUCKET_BYTES;
//read the bucket's key and flags into registers
int bucketX = PlatformDependent.getInt(bucketAddr + BUCKET_KEY_OFFSET + KEY_X_OFFSET);
int bucketY = PlatformDependent.getInt(bucketAddr + BUCKET_KEY_OFFSET + KEY_Y_OFFSET);
int bucketZ = PlatformDependent.getInt(bucketAddr + BUCKET_KEY_OFFSET + KEY_Z_OFFSET);
long flags = PlatformDependent.getLong(bucketAddr + BUCKET_VALUE_OFFSET + VALUE_FLAGS_OFFSET);
if (flags == 0L) { //the bucket is unset. we've reached the end of the bucket chain for this hash, which means it doesn't exist
return DEFAULT_RETURN_VALUE;
} else if (bucketX != searchBucketX || bucketY != searchBucketY || bucketZ != searchBucketZ) { //the bucket doesn't match, so the search must go on
continue;
} else if ((flags & flag) == 0L) { //we've found a matching bucket, but the position's flag is unset. there's nothing for us to do...
return DEFAULT_RETURN_VALUE;
}
//load the old value in order to return it later (there's no reason to zero it out, since the flag bit will be cleared anyway)
int oldVal = PlatformDependent.getByte(bucketAddr + BUCKET_VALUE_OFFSET + VALUE_VALS_OFFSET + positionIndex(x, y, z) * Byte.BYTES) & 0xFF;
//remove entry from map
this.removeEntry(tableAddr, mask, bucketIndex, bucketAddr, flags, flag);
return oldVal;
}
}
/**
* Gets and removes an entry from this map, then passes it to the given callback function.
* <p>
* The callback function is allowed to modify this map.
*
* @param action the callback function
*
* @return whether or not the callback function was invoked. A return value of {@code false} indicates that the map was already empty
*/
public boolean poll(EntryConsumer action) {
long bucketIndex = this.firstBucketIndex;
if (bucketIndex >= 0L) {
long tableAddr = this.tableAddr;
long bucketAddr = tableAddr + bucketIndex * BUCKET_BYTES;
//read the bucket's key and flags into registers
int bucketX = PlatformDependent.getInt(bucketAddr + BUCKET_KEY_OFFSET + KEY_X_OFFSET);
int bucketY = PlatformDependent.getInt(bucketAddr + BUCKET_KEY_OFFSET + KEY_Y_OFFSET);
int bucketZ = PlatformDependent.getInt(bucketAddr + BUCKET_KEY_OFFSET + KEY_Z_OFFSET);
long flags = PlatformDependent.getLong(bucketAddr + BUCKET_VALUE_OFFSET + VALUE_FLAGS_OFFSET);
assert flags != 0L : "polled empty bucket?!?";
//this is intrinsic and compiles into TZCNT, which has a latency of 3 cycles - much faster than iterating through all 64 bits
// and checking each one individually!
int index = Long.numberOfTrailingZeros(flags);
//compute entry position within bucket
int dx = index >> (BUCKET_AXIS_BITS * 2);
int dy = (index >> BUCKET_AXIS_BITS) & BUCKET_AXIS_MASK;
int dz = index & BUCKET_AXIS_MASK;
int val = PlatformDependent.getByte(bucketAddr + BUCKET_VALUE_OFFSET + VALUE_VALS_OFFSET + index * Byte.BYTES) & 0xFF;
//remove entry from bucket
this.removeEntry(tableAddr, this.tableSize - 1L, bucketIndex, bucketAddr, flags, 1L << index);
//run the callback
action.accept((bucketX << BUCKET_AXIS_BITS) + dx, (bucketY << BUCKET_AXIS_BITS) + dy, (bucketZ << BUCKET_AXIS_BITS) + dz, val);
return true;
} else {
return false;
}
}
//assumes that the entry is present in the bucket
protected void removeEntry(long tableAddr, long mask, long bucketIndex, long bucketAddr, long flags, long flag) {
//the bucket that we found contains the position, so now we remove it from the set
this.size--;
//update bucket flags
flags &= ~flag;
PlatformDependent.putLong(bucketAddr + BUCKET_VALUE_OFFSET + VALUE_FLAGS_OFFSET, flags);
if (flags == 0L) { //this position was the only position in the bucket, so we need to delete the bucket
this.usedBuckets--;
//remove the bucket from the linked list
long prevBucketIndex = PlatformDependent.getLong(bucketAddr + BUCKET_PREVINDEX_OFFSET);
long nextBucketIndex = PlatformDependent.getLong(bucketAddr + BUCKET_NEXTINDEX_OFFSET);
if (prevBucketIndex < 0L) { //previous bucket is nullptr, meaning the current bucket used to be at the front
this.firstBucketIndex = nextBucketIndex;
} else {
long prevBucketAddr = tableAddr + prevBucketIndex * BUCKET_BYTES;
PlatformDependent.putLong(prevBucketAddr + BUCKET_NEXTINDEX_OFFSET, nextBucketIndex);
}
if (nextBucketIndex < 0L) { //next bucket is nullptr, meaning the current bucket used to be at the back
this.lastBucketIndex = prevBucketIndex;
} else {
long nextBucketAddr = tableAddr + nextBucketIndex * BUCKET_BYTES;
PlatformDependent.putLong(nextBucketAddr + BUCKET_PREVINDEX_OFFSET, prevBucketIndex);
}
//shifting the buckets IS expensive, yes, but it'll only happen when the entire bucket is deleted, which won't happen on every removal
this.shiftBuckets(tableAddr, bucketIndex, mask);
}
}
//adapted from it.unimi.dsi.fastutil.objects.Object2ObjectOpenHashMap#shiftKeys(int)
protected void shiftBuckets(long tableAddr, long pos, long mask) {
long last;
long slot;
while (true) {
for (pos = ((last = pos) + 1L) & mask; ; pos = (pos + 1L) & mask) {
long currAddr = tableAddr + pos * BUCKET_BYTES;
if (PlatformDependent.getLong(currAddr + BUCKET_VALUE_OFFSET + VALUE_FLAGS_OFFSET) == 0L) { //curr points to an unset bucket
if (PlatformDependent.getLong(tableAddr + last * BUCKET_BYTES + BUCKET_VALUE_OFFSET + VALUE_FLAGS_OFFSET) != 0L) {
System.out.println("non-zero!");
}
//PlatformDependent.putLong(tableAddr + last * BUCKET_BYTES + BUCKET_VALUE_OFFSET + VALUE_FLAGS_OFFSET, 0L); //delete last bucket
return;
}
slot = hashPosition(
PlatformDependent.getInt(currAddr + BUCKET_KEY_OFFSET + KEY_X_OFFSET),
PlatformDependent.getInt(currAddr + BUCKET_KEY_OFFSET + KEY_Y_OFFSET),
PlatformDependent.getInt(currAddr + BUCKET_KEY_OFFSET + KEY_Z_OFFSET)) & mask;
if (last <= pos ? last >= slot || slot > pos : last >= slot && slot > pos) { //move the bucket
long newAddr = tableAddr + last * BUCKET_BYTES;
//copy bucket to new address
memcpy(currAddr, newAddr, BUCKET_BYTES);
//clear flags in bucket's old position to mark it as empty
PlatformDependent.putLong(currAddr + BUCKET_VALUE_OFFSET + VALUE_FLAGS_OFFSET, 0L);
//update pointer to self in linked list neighbors
long prevBucketIndex = PlatformDependent.getLong(currAddr + BUCKET_PREVINDEX_OFFSET);
long nextBucketIndex = PlatformDependent.getLong(currAddr + BUCKET_NEXTINDEX_OFFSET);
if (prevBucketIndex < 0L) { //previous bucket is nullptr, meaning the current bucket used to be at the front
this.firstBucketIndex = last;
} else {
long prevBucketAddr = tableAddr + prevBucketIndex * BUCKET_BYTES;
PlatformDependent.putLong(prevBucketAddr + BUCKET_NEXTINDEX_OFFSET, last);
}
if (nextBucketIndex < 0L) { //next bucket is nullptr, meaning the current bucket used to be at the back
this.lastBucketIndex = last;
} else {
long nextBucketAddr = tableAddr + nextBucketIndex * BUCKET_BYTES;
PlatformDependent.putLong(nextBucketAddr + BUCKET_PREVINDEX_OFFSET, last);
}
break;
}
}
}
}
/**
* Removes every entry from this set.
*
* @see java.util.Map#clear()
*/
public void clear() {
if (this.isEmpty()) { //if the set is empty, there's nothing to clear
return;
}
//fill the entire table with zeroes
// (since the table isn't empty, we can be sure that the table has been allocated so there's no reason to check for it)
PlatformDependent.setMemory(this.tableAddr, this.tableSize * BUCKET_BYTES, (byte) 0);
//reset all size counters
this.usedBuckets = 0L;
this.size = 0L;
this.firstBucketIndex = -1L;
this.lastBucketIndex = -1L;
}
protected void setTableSize(long tableSize) {
this.tableSize = tableSize;
this.resizeThreshold = (tableSize >> 1L) + (tableSize >> 2L); //count * 0.75
}
/**
* Called longSize to ensure compatibility with the vanilla use of {@code int Long2ByteOpenHashMap.size()}
*
* @return the number of entries stored in this map
*/
public long longSize() {
return this.size;
}
/**
* @return whether or not this map is empty (contains no entries)
*/
public boolean isEmpty() {
return this.size == 0L;
}
@Override
public Int3UByteLinkedHashMap clone() {
return new Int3UByteLinkedHashMap(this);
}
/**
* Irrevocably releases the resources claimed by this instance.
* <p>
* Once this method has been called, all methods in this class will produce undefined behavior.
*/
@Override
public void close() {
if (this.closed) {
return;
}
this.closed = true;
//actually release memory
if (this.tableAddr != 0L) {
PlatformDependent.freeMemory(this.tableAddr);
}
}
@Override
@SuppressWarnings("deprecation")
protected void finalize() {
//using a finalizer is bad, i know. however, there's no other reasonable way for me to clean up the memory without pulling in PorkLib:unsafe or
// using sun.misc.Cleaner directly...
this.close();
}
public Int3KeySet int3KeySet(){
return new Int3KeySet();
}
/**
* A function which accepts a map entry (consisting of 3 {@code int}s for the key and 1 {@code int} for the value) as a parameter.
*/
@FunctionalInterface
public interface EntryConsumer {
void accept(int x, int y, int z, int value);
}
//Methods for vanilla compatibility
public byte get(long l){
return (byte) get(BlockPos.getX(l), BlockPos.getY(l), BlockPos.getZ(l));
}
public byte remove(long l){
return (byte) remove(BlockPos.getX(l), BlockPos.getY(l), BlockPos.getZ(l));
}
public byte put(long l, byte value){
return (byte) put(BlockPos.getX(l), BlockPos.getY(l), BlockPos.getZ(l), value);
}
public int size(){
return (int) size;
}
protected class LongKeyIterator implements LongListIterator{
long bucketIndex;
long currentValue;
int offset = -1;
public LongKeyIterator(){
if(tableAddr == 0){
bucketIndex = -1;
currentValue = 0;
return;
}
this.bucketIndex = firstBucketIndex;
if(bucketIndex != -1) {
currentValue = PlatformDependent.getLong(tableAddr + bucketIndex * BUCKET_BYTES + VALUE_FLAGS_OFFSET);
}
}
@Override public long previousLong() {
throw new UnsupportedOperationException();
}
@Override public boolean hasPrevious() {
throw new UnsupportedOperationException();
}
@Override public int nextIndex() {
throw new UnsupportedOperationException();
}
@Override public int previousIndex() {
throw new UnsupportedOperationException();
}
@Override public long nextLong() {
if(currentValue == 0){
bucketIndex = PlatformDependent.getLong(tableAddr + bucketIndex * BUCKET_BYTES + BUCKET_NEXTINDEX_OFFSET);
currentValue = PlatformDependent.getLong(tableAddr + bucketIndex * BUCKET_BYTES + VALUE_FLAGS_OFFSET);
offset = -1;
}
int shift = Long.numberOfTrailingZeros(currentValue) + 1;
currentValue >>= shift;
offset += shift;
long bucketAddr = tableAddr + bucketIndex * BUCKET_BYTES;
return BlockPos.asLong(
PlatformDependent.getInt(bucketAddr + KEY_X_OFFSET) << BUCKET_AXIS_BITS + (offset >> (BUCKET_AXIS_BITS * 2)),
PlatformDependent.getInt(bucketAddr + KEY_Y_OFFSET) << BUCKET_AXIS_BITS + ((offset >> BUCKET_AXIS_BITS) & BUCKET_AXIS_MASK),
PlatformDependent.getInt(bucketAddr + KEY_Z_OFFSET) << BUCKET_AXIS_BITS + (offset & BUCKET_AXIS_MASK)
);
}
@Override public boolean hasNext() {
if(bucketIndex == -1) return false;
return !(currentValue == 0 && PlatformDependent.getLong(tableAddr + bucketIndex * BUCKET_BYTES + BUCKET_NEXTINDEX_OFFSET) != -1);
}
}
protected class LongKeySet extends AbstractLongSortedSet{
@Override
public LongBidirectionalIterator iterator(long fromElement) {
throw new UnsupportedOperationException();
}
@Override
public LongBidirectionalIterator iterator() {
return new LongKeyIterator();
}
@Override
public int size() {
return (int) size;
}
@Override
public LongSortedSet subSet(long fromElement, long toElement) {
throw new UnsupportedOperationException();
}
@Override
public LongSortedSet headSet(long toElement) {
throw new UnsupportedOperationException();
}
@Override
public LongSortedSet tailSet(long fromElement) {
throw new UnsupportedOperationException();
}
@Override
public LongComparator comparator() {
return null;
}
@Override
public long firstLong() {
if (size == 0)
throw new NoSuchElementException();
long bucketAddr = tableAddr + firstBucketIndex * BUCKET_BYTES;
int x = PlatformDependent.getInt(bucketAddr + KEY_X_OFFSET) << BUCKET_AXIS_BITS;
int y = PlatformDependent.getInt(bucketAddr + KEY_Y_OFFSET) << BUCKET_AXIS_BITS;
int z = PlatformDependent.getInt(bucketAddr + KEY_Z_OFFSET) << BUCKET_AXIS_BITS;
long value = PlatformDependent.getLong(bucketAddr + VALUE_FLAGS_OFFSET);
int index = Long.numberOfTrailingZeros(value);
return BlockPos.asLong(
x + (index >> (BUCKET_AXIS_BITS * 2)),
y + ((index >> BUCKET_AXIS_BITS) & BUCKET_AXIS_MASK),
z + (index & BUCKET_AXIS_MASK)
);
}
@Override
public long lastLong() {
if (size == 0)
throw new NoSuchElementException();
long bucketAddr = tableAddr + lastBucketIndex * BUCKET_BYTES;
int x = PlatformDependent.getInt(bucketAddr + KEY_X_OFFSET) << BUCKET_AXIS_BITS;
int y = PlatformDependent.getInt(bucketAddr + KEY_Y_OFFSET) << BUCKET_AXIS_BITS;
int z = PlatformDependent.getInt(bucketAddr + KEY_Z_OFFSET) << BUCKET_AXIS_BITS;
long value = PlatformDependent.getLong(bucketAddr + VALUE_FLAGS_OFFSET);
int index = 63 - Long.numberOfLeadingZeros(value);
return BlockPos.asLong(
x + (index >> (BUCKET_AXIS_BITS * 2)),
y + ((index >> BUCKET_AXIS_BITS) & BUCKET_AXIS_MASK),
z + (index & BUCKET_AXIS_MASK)
);
}
}
//These methods are very similar to ones defined above
public class Int3KeySet{
//This is the only method that ever gets called on it
public void forEach(XYZConsumer action){
if (tableAddr == 0L //table hasn't even been allocated
|| isEmpty()) { //no entries are present
return; //there's nothing to iterate over...
}
if (usedBuckets >= (tableSize >> 1L)) { //table is at least half-full
forEachKeyFull(action);
} else {
forEachKeySparse(action);
}
}
}
private void forEachKeySparse(XYZConsumer action) {
long tableAddr = this.tableAddr;
for (long bucketIndex = this.firstBucketIndex, bucketAddr = tableAddr + bucketIndex * BUCKET_BYTES;
bucketIndex >= 0L;
bucketIndex = PlatformDependent.getLong(bucketAddr + BUCKET_NEXTINDEX_OFFSET), bucketAddr = tableAddr + bucketIndex * BUCKET_BYTES) {
this.forEachKeyInBucket(action, bucketAddr);
}
}
private void forEachKeyFull(XYZConsumer action) {
for (long bucketAddr = this.tableAddr, end = bucketAddr + this.tableSize * BUCKET_BYTES; bucketAddr != end; bucketAddr += BUCKET_BYTES) {
this.forEachKeyInBucket(action, bucketAddr);
}
}
private void forEachKeyInBucket(XYZConsumer action, long bucketAddr) {
//read the bucket's key and flags into registers
int bucketX = PlatformDependent.getInt(bucketAddr + BUCKET_KEY_OFFSET + KEY_X_OFFSET);
int bucketY = PlatformDependent.getInt(bucketAddr + BUCKET_KEY_OFFSET + KEY_Y_OFFSET);
int bucketZ = PlatformDependent.getInt(bucketAddr + BUCKET_KEY_OFFSET + KEY_Z_OFFSET);
long flags = PlatformDependent.getLong(bucketAddr + BUCKET_VALUE_OFFSET + VALUE_FLAGS_OFFSET);
while (flags != 0L) {
//this is intrinsic and compiles into TZCNT, which has a latency of 3 cycles - much faster than iterating through all 64 bits
// and checking each one individually!
int index = Long.numberOfTrailingZeros(flags);
//clear the bit in question so that it won't be returned next time around
flags &= ~(1L << index);
int dx = index >> (BUCKET_AXIS_BITS * 2);
int dy = (index >> BUCKET_AXIS_BITS) & BUCKET_AXIS_MASK;
int dz = index & BUCKET_AXIS_MASK;
action.accept((bucketX << BUCKET_AXIS_BITS) + dx, (bucketY << BUCKET_AXIS_BITS) + dy, (bucketZ << BUCKET_AXIS_BITS) + dz);
}
}
//TODO: Make this more efficient
public LinkedInt3HashSet keySet(){
LinkedInt3HashSet set = new LinkedInt3HashSet();
this.forEach((x, y, z, __) -> {
set.add(x, y, z);
});
return set;
}
}
|
/**
* detect-configuration
*
* Copyright (C) 2019 Black Duck Software, Inc.
* http://www.blackducksoftware.com/
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.blackducksoftware.integration.hub.detect;
public enum DetectMajorVersion {
ONE(1),
TWO(2),
THREE(3),
FOUR(4),
FIVE(5),
SIX(6),
SEVEN(7);
private int value = 0;
DetectMajorVersion(final int value) {
this.value = value;
}
public int getIntValue() {
return value;
}
public String getDisplayValue() {
return Integer.toString(value) + ".0.0";
}
}
|
/*
* Sakuli - Testing and Monitoring-Tool for Websites and common UIs.
*
* Copyright 2013 - 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sakuli.services;
import org.checkerframework.checker.nullness.qual.NonNull;
import org.sakuli.datamodel.AbstractTestDataEntity;
import org.sakuli.datamodel.TestCase;
import org.sakuli.datamodel.TestCaseStep;
import org.sakuli.datamodel.TestSuite;
import org.sakuli.exceptions.SakuliRuntimeException;
import java.util.Optional;
/**
* Service Interface which will be called on different teardown phases of the {@link AbstractTestDataEntity} objects
*
* @author tschneck
* Date: 2/12/16
*/
@SuppressWarnings("OptionalUsedAsFieldOrParameterType")
public interface TeardownService extends PrioritizedService {
/**
* see {@link #tearDown(Optional, boolean)}.
*/
default void tearDown(Optional<AbstractTestDataEntity> dataEntity) {
tearDown(dataEntity, false);
}
/**
* Triggers the default actions if some {@link AbstractTestDataEntity} are called by {@link org.sakuli.datamodel.helper.TestSuiteHelper}.
* Default caller method for:
* - {@link #teardownTestSuite(TestSuite)}
* - {@link #teardownTestCase(TestCase)}
* - {@link #teardownTestCaseStep(TestCaseStep)}
*
* @param dataEntity instace of {@link AbstractTestDataEntity}
* @param asyncCall indicates if a call is triggerd in an async process to the main process to use the correect exception handling, see {@link #handleTeardownException(Exception, boolean, AbstractTestDataEntity)}.
*/
default void tearDown(Optional<AbstractTestDataEntity> dataEntity, boolean asyncCall) {
try {
dataEntity.filter(TestSuite.class::isInstance).map(TestSuite.class::cast)
.ifPresent(this::teardownTestSuite);
dataEntity.filter(TestCase.class::isInstance).map(TestCase.class::cast)
.ifPresent(this::teardownTestCase);
dataEntity.filter(TestCaseStep.class::isInstance).map(TestCaseStep.class::cast)
.ifPresent(this::teardownTestCaseStep);
} catch (Exception e) {
handleTeardownException(e, asyncCall, dataEntity.get());
}
}
/**
* Define exception handling when {@link #tearDown(Optional)} will throw an exception.
* Can't implement in an function context like here.
*
* @param e any {@link Exception}
* @param async defines if the caller is an asynchronous procedure to the main process of {@link org.sakuli.starter.SakuliStarter}
* @param testDataRef Provides the meta information on which execution step this exception is thrown. See {@link org.sakuli.exceptions.SakuliException#setAsyncTestDataRef(AbstractTestDataEntity)}
*/
void handleTeardownException(@NonNull Exception e, boolean async, @NonNull AbstractTestDataEntity testDataRef);
/**
* Triggers the different implementations of the {@link TeardownService} for the {@link TestSuite} object.
* On Exception a {@link RuntimeException} should be thrown to be catched from {@link #tearDown(Optional, boolean)}
*/
default void teardownTestSuite(@NonNull TestSuite testSuite) throws RuntimeException {
throw new SakuliRuntimeException("Method 'teardownTestSuite' is not implemented for forwarder class " + getClass().getSimpleName());
}
/**
* Triggers the different implementations of the {@link TeardownService} for the {@link TestCase} object.
* On Exception a {@link RuntimeException} should be thrown to be catched from {@link #tearDown(Optional, boolean)}
*/
default void teardownTestCase(@NonNull TestCase testCase) throws RuntimeException {
throw new SakuliRuntimeException("Method 'teardownTestCase' is not implemented for forwarder class " + getClass().getSimpleName());
}
/**
* Triggers the different implementations of the {@link TeardownService} for the {@link TestCaseStep} object.
* On Exception a {@link RuntimeException} should be thrown to be catched from {@link #tearDown(Optional, boolean)}
*/
default void teardownTestCaseStep(@NonNull TestCaseStep testCaseStep) throws RuntimeException {
throw new SakuliRuntimeException("Method 'teardownTestCaseStep' is not implemented for forwarder class " + getClass().getSimpleName());
}
}
|
// "Change parameter 'i' type to 'java.lang.String'" "true"
class Ex{
void foo(String i) {
bar(i);
}
void bar(String s) {}
}
|
// Decompiled by Jad v1.5.8g. Copyright 2001 Pavel Kouznetsov.
// Jad home page: http://www.kpdus.com/jad.html
// Decompiler options: packimports(3) annotate safe
package com.jakewharton.rxbinding.view;
import android.view.View;
import com.jakewharton.rxbinding.internal.MainThreadSubscription;
import com.jakewharton.rxbinding.internal.Preconditions;
import rx.Subscriber;
import rx.functions.Func0;
final class ViewLongClickOnSubscribe
implements rx.Observable.OnSubscribe
{
ViewLongClickOnSubscribe(View view1, Func0 func0)
{
// 0 0:aload_0
// 1 1:invokespecial #23 <Method void Object()>
// 2 4:aload_0
// 3 5:new #5 <Class Object>
// 4 8:dup
// 5 9:invokespecial #23 <Method void Object()>
// 6 12:putfield #25 <Field Object event>
view = view1;
// 7 15:aload_0
// 8 16:aload_1
// 9 17:putfield #27 <Field View view>
handled = func0;
// 10 20:aload_0
// 11 21:aload_2
// 12 22:putfield #29 <Field Func0 handled>
// 13 25:return
}
public volatile void call(Object obj)
{
call((Subscriber)obj);
// 0 0:aload_0
// 1 1:aload_1
// 2 2:checkcast #42 <Class Subscriber>
// 3 5:invokevirtual #45 <Method void call(Subscriber)>
// 4 8:return
}
public void call(final Subscriber subscriber)
{
Preconditions.checkUiThread();
// 0 0:invokestatic #50 <Method void Preconditions.checkUiThread()>
android.view.View.OnLongClickListener onlongclicklistener = new android.view.View.OnLongClickListener() {
public boolean onLongClick(View view1)
{
if(((Boolean)handled.call()).booleanValue())
//* 0 0:aload_0
//* 1 1:getfield #19 <Field ViewLongClickOnSubscribe this$0>
//* 2 4:invokestatic #31 <Method Func0 ViewLongClickOnSubscribe.access$000(ViewLongClickOnSubscribe)>
//* 3 7:invokeinterface #36 <Method Object Func0.call()>
//* 4 12:checkcast #38 <Class Boolean>
//* 5 15:invokevirtual #42 <Method boolean Boolean.booleanValue()>
//* 6 18:ifeq 47
{
if(!subscriber.isUnsubscribed())
//* 7 21:aload_0
//* 8 22:getfield #21 <Field Subscriber val$subscriber>
//* 9 25:invokevirtual #47 <Method boolean Subscriber.isUnsubscribed()>
//* 10 28:ifne 45
subscriber.onNext(event);
// 11 31:aload_0
// 12 32:getfield #21 <Field Subscriber val$subscriber>
// 13 35:aload_0
// 14 36:getfield #19 <Field ViewLongClickOnSubscribe this$0>
// 15 39:invokestatic #51 <Method Object ViewLongClickOnSubscribe.access$100(ViewLongClickOnSubscribe)>
// 16 42:invokevirtual #55 <Method void Subscriber.onNext(Object)>
return true;
// 17 45:iconst_1
// 18 46:ireturn
} else
{
return false;
// 19 47:iconst_0
// 20 48:ireturn
}
}
final ViewLongClickOnSubscribe this$0;
final Subscriber val$subscriber;
{
this$0 = ViewLongClickOnSubscribe.this;
// 0 0:aload_0
// 1 1:aload_1
// 2 2:putfield #19 <Field ViewLongClickOnSubscribe this$0>
subscriber = subscriber1;
// 3 5:aload_0
// 4 6:aload_2
// 5 7:putfield #21 <Field Subscriber val$subscriber>
super();
// 6 10:aload_0
// 7 11:invokespecial #24 <Method void Object()>
// 8 14:return
}
}
;
// 1 3:new #9 <Class ViewLongClickOnSubscribe$1>
// 2 6:dup
// 3 7:aload_0
// 4 8:aload_1
// 5 9:invokespecial #53 <Method void ViewLongClickOnSubscribe$1(ViewLongClickOnSubscribe, Subscriber)>
// 6 12:astore_2
view.setOnLongClickListener(onlongclicklistener);
// 7 13:aload_0
// 8 14:getfield #27 <Field View view>
// 9 17:aload_2
// 10 18:invokevirtual #59 <Method void View.setOnLongClickListener(android.view.View$OnLongClickListener)>
subscriber.add(((rx.Subscription) (new MainThreadSubscription() {
protected void onUnsubscribe()
{
view.setOnLongClickListener(((android.view.View.OnLongClickListener) (null)));
// 0 0:aload_0
// 1 1:getfield #15 <Field ViewLongClickOnSubscribe this$0>
// 2 4:invokestatic #24 <Method View ViewLongClickOnSubscribe.access$200(ViewLongClickOnSubscribe)>
// 3 7:aconst_null
// 4 8:invokevirtual #30 <Method void View.setOnLongClickListener(android.view.View$OnLongClickListener)>
// 5 11:return
}
final ViewLongClickOnSubscribe this$0;
{
this$0 = ViewLongClickOnSubscribe.this;
// 0 0:aload_0
// 1 1:aload_1
// 2 2:putfield #15 <Field ViewLongClickOnSubscribe this$0>
super();
// 3 5:aload_0
// 4 6:invokespecial #18 <Method void MainThreadSubscription()>
// 5 9:return
}
}
)));
// 11 21:aload_1
// 12 22:new #11 <Class ViewLongClickOnSubscribe$2>
// 13 25:dup
// 14 26:aload_0
// 15 27:invokespecial #62 <Method void ViewLongClickOnSubscribe$2(ViewLongClickOnSubscribe)>
// 16 30:invokevirtual #66 <Method void Subscriber.add(rx.Subscription)>
// 17 33:return
}
private final Object event = new Object();
private final Func0 handled;
private final View view;
/*
static Func0 access$000(ViewLongClickOnSubscribe viewlongclickonsubscribe)
{
return viewlongclickonsubscribe.handled;
// 0 0:aload_0
// 1 1:getfield #29 <Field Func0 handled>
// 2 4:areturn
}
*/
/*
static Object access$100(ViewLongClickOnSubscribe viewlongclickonsubscribe)
{
return viewlongclickonsubscribe.event;
// 0 0:aload_0
// 1 1:getfield #25 <Field Object event>
// 2 4:areturn
}
*/
/*
static View access$200(ViewLongClickOnSubscribe viewlongclickonsubscribe)
{
return viewlongclickonsubscribe.view;
// 0 0:aload_0
// 1 1:getfield #27 <Field View view>
// 2 4:areturn
}
*/
}
|
/*
* Copyright 2010 Vrije Universiteit
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ibis.ipl.impl.multi;
import ibis.ipl.IbisIdentifier;
import ibis.ipl.RegistryEventHandler;
import java.io.IOException;
public class MultiRegistryEventHandler implements RegistryEventHandler {
private final RegistryEventHandler subHandler;
private final MultiIbis ibis;
private MultiRegistry registry;
private String ibisName;
public MultiRegistryEventHandler(MultiIbis ibis,
RegistryEventHandler subHandler) {
this.ibis = ibis;
this.subHandler = subHandler;
}
public synchronized void died(IbisIdentifier corpse) {
while (registry == null) {
try {
wait();
} catch (InterruptedException e) {
// Ignored
}
}
try {
MultiIbisIdentifier id = ibis.mapIdentifier(corpse, ibisName);
if (!registry.died.containsKey(id)) {
registry.died.put(id, id);
subHandler.died(id);
}
} catch (IOException e) {
// TODO What the hell to do.
}
}
public synchronized void electionResult(String electionName,
IbisIdentifier winner) {
while (registry == null) {
try {
wait();
} catch (InterruptedException e) {
// Ignored
}
}
try {
MultiIbisIdentifier id = ibis.mapIdentifier(winner, ibisName);
if (!registry.elected.containsKey(electionName)) {
registry.elected.put(electionName, id);
subHandler.electionResult(electionName, id);
} else {
MultiIbisIdentifier oldWinner = registry.elected
.get(electionName);
if (!oldWinner.equals(id)) {
registry.elected.put(electionName, id);
subHandler.electionResult(electionName, id);
}
}
} catch (IOException e) {
// TODO What the hell to do
}
}
public void gotSignal(String signal, IbisIdentifier source) {
subHandler.gotSignal(signal, source);
}
public synchronized void joined(IbisIdentifier joinedIbis) {
while (registry == null) {
try {
wait();
} catch (InterruptedException e) {
// Ignored
}
}
try {
MultiIbisIdentifier id = ibis.mapIdentifier(joinedIbis, ibisName);
if (!registry.joined.containsKey(id)) {
registry.joined.put(id, id);
subHandler.joined(id);
}
} catch (IOException e) {
// TODO What the hell to do here?
}
}
public synchronized void left(IbisIdentifier leftIbis) {
while (registry == null) {
try {
wait();
} catch (InterruptedException e) {
// Ignored
}
}
try {
MultiIbisIdentifier id = ibis.mapIdentifier(leftIbis, ibisName);
if (!registry.left.containsKey(id)) {
registry.left.put(id, id);
subHandler.left(id);
}
} catch (IOException e) {
// TODO What the hell to do here?
}
}
public synchronized void setName(String ibisName) {
this.ibisName = ibisName;
}
public synchronized void setRegistry(MultiRegistry registry) {
this.registry = (MultiRegistry) ibis.registry();
notifyAll();
}
public void poolClosed() {
// FIXME: implement
}
public void poolTerminated(IbisIdentifier source) {
// FIXME: implement
}
}
|
package gov.cdc.sdp.cbr;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import org.junit.Test;
import com.google.gson.Gson;
import gov.cdc.sdp.cbr.model.PhinMSSourceAttributes;
import gov.cdc.sdp.cbr.model.SDPMessage;
public class SerializerTest {
@Test
public void testSerialization() {
String sample = "{\"batch\":true,\"batchId\":\"batch_id_1\",\"batchIndex\":12993399,\"cbrReceivedTime\":\"officia nulla\",\"id\":\"dolor fugiat nulla do\",\"payload\":\"in est ipsum Lorem\",\"recipient\":\"laborum nulla minim amet\",\"sender\":\"ullamco nulla\",\"source\":\"amet exercitation do esse mollit\",\"source_id\":\"cillum nisi labore officia\",\"source_received_time\":\"sit aliquip commodo\","
+ "\"sourceAttributes\": {\"PAYLOADNAME\":\"TEST1\",\"ERRORCODE\":\"0\"}}";
Gson gson = new Gson();
SDPMessage msg = gson.fromJson(sample, SDPMessage.class);
assertTrue(msg.isBatch());
assertEquals("batch_id_1", msg.getBatchId());
assertNotNull(msg.getSourceAttributes());
assertEquals("0", msg.getSourceAttributes().get(PhinMSSourceAttributes.ERRORCODE));
msg.toString();
}
}
|
/*
* Autopsy Forensic Browser
*
* Copyright 2013 Basis Technology Corp.
* Contact: carrier <at> sleuthkit <dot> org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sleuthkit.autopsy.corelibs;
import java.awt.image.BufferedImage;
import java.awt.image.BufferedImageOp;
import org.imgscalr.Scalr;
import org.imgscalr.Scalr.Method;
/**
* Scalr wrapper to deal with exports and provide thread-safety
*
*/
public class ScalrWrapper {
public static synchronized BufferedImage resize(BufferedImage input, int width, int height) {
return Scalr.resize(input, width, height, Scalr.OP_ANTIALIAS);
}
public static synchronized BufferedImage resize(BufferedImage input, int size) {
return Scalr.resize(input, size, Scalr.OP_ANTIALIAS);
}
public static synchronized BufferedImage resizeHighQuality(BufferedImage input, int width, int height) {
return Scalr.resize(input, Method.QUALITY, width, height, Scalr.OP_ANTIALIAS);
}
public static synchronized BufferedImage resizeFast(BufferedImage input, int size) {
return Scalr.resize(input, Method.SPEED, Scalr.Mode.AUTOMATIC, size, Scalr.OP_ANTIALIAS);
}
public static synchronized BufferedImage resizeFast(BufferedImage input, int width, int height) {
return Scalr.resize(input, Method.SPEED, Scalr.Mode.AUTOMATIC, width, height, Scalr.OP_ANTIALIAS);
}
public static synchronized BufferedImage cropImage(BufferedImage input, int width, int height) {
return Scalr.crop(input, width, height, (BufferedImageOp) null);
}
}
|
package slimeknights.tconstruct.shared;
import slimeknights.tconstruct.world.TinkerWorld;
import net.minecraft.block.Block;
import net.minecraft.block.BlockState;
import net.minecraft.entity.Entity;
import net.minecraft.sound.SoundEvents;
import net.minecraft.util.math.BlockPos;
public class BlockEvents {
// Slimy block jump stuff
public static void onLivingJump(Entity entity) {
if (entity == null) {
return;
}
// check if we jumped from a slime block
BlockPos pos = new BlockPos(entity.getX(), entity.getY(), entity.getZ());
if (entity.getEntityWorld().isAir(pos)) {
pos = pos.down();
}
BlockState state = entity.getEntityWorld().getBlockState(pos);
Block block = state.getBlock();
if (TinkerWorld.congealedSlime.contains(block)) {
bounce(entity, 0.25f);
} else if (TinkerWorld.slimeDirt.contains(block) || TinkerWorld.vanillaSlimeGrass.contains(block) || TinkerWorld.earthSlimeGrass.contains(block) || TinkerWorld.skySlimeGrass.contains(block) || TinkerWorld.enderSlimeGrass.contains(block) || TinkerWorld.ichorSlimeGrass.contains(block)) {
bounce(entity, 0.06f);
}
}
private static void bounce(Entity entity, float amount) {
entity.setVelocity(entity.getVelocity().add(0.0D, (double) amount, 0.0D));
entity.playSound(SoundEvents.ENTITY_SLIME_SQUISH, 0.5f + amount, 1f);
}
private BlockEvents() {}
}
|
package me.alen_alex.bridgepractice.listener;
import me.alen_alex.bridgepractice.BridgePractice;
import me.alen_alex.bridgepractice.api.PlayerIslandJoinEvent;
import me.alen_alex.bridgepractice.configurations.Configuration;
import me.alen_alex.bridgepractice.data.DataManager;
import me.alen_alex.bridgepractice.playerdata.PlayerData;
import me.alen_alex.bridgepractice.playerdata.PlayerDataManager;
import org.bukkit.Bukkit;
import org.bukkit.entity.Player;
import org.bukkit.event.EventHandler;
import org.bukkit.event.Listener;
import org.bukkit.event.player.AsyncPlayerPreLoginEvent;
import java.sql.SQLException;
import java.util.UUID;
public class PlayerJoinEvent implements Listener {
@EventHandler
public void onPlayerJoin(org.bukkit.event.player.PlayerJoinEvent event){
Player player = event.getPlayer();
UUID playerUUID = player.getUniqueId();
try {
if(!DataManager.isUserRegisetered(playerUUID)) {
DataManager.registerUser(player);
}
} catch (SQLException e) {
player.kickPlayer("&cDatabase seems to be offline. Enquire with adminstrators");
e.printStackTrace();
}
try {
PlayerDataManager.loadPlayerData(playerUUID);
} catch (SQLException throwables) {
throwables.printStackTrace();
player.kickPlayer("Seems like its unable to load the player data");
}
if(PlayerDataManager.getCachedPlayerData().containsKey(playerUUID)) {
if(Configuration.isSpawnOnJoinEnabled()){
PlayerDataManager.getCachedPlayerData().get(playerUUID).teleportPlayerToSpawn();
}
if (Configuration.isClearPlayerOnJoinEnabled()) {
if (Configuration.getClearPlayerJoinDelay() <= 1) {
PlayerDataManager.getCachedPlayerData().get(playerUUID).setToLobbyState();
} else {
Bukkit.getScheduler().runTaskLaterAsynchronously(BridgePractice.getPlugin(), new Runnable() {
@Override
public void run() {
PlayerDataManager.getCachedPlayerData().get(playerUUID).setToLobbyState();
}
}, (long) Configuration.getClearPlayerJoinDelay());
}
}
}else{
player.kickPlayer("Seems like its unable to load the player data");
}
}
@EventHandler
public void onAsyncPlayerJoin(AsyncPlayerPreLoginEvent event){
UUID playerUUID = event.getUniqueId();
try {
if(!DataManager.isUserRegisetered(playerUUID)) {
DataManager.registerUser(event.getName(),event.getUniqueId().toString());
}
} catch (SQLException e) {
event.setKickMessage("&cDatabase seems to be offline. Enquire with adminstrators");
event.setLoginResult(AsyncPlayerPreLoginEvent.Result.KICK_OTHER);
e.printStackTrace();
}
}
}
|
package jp.co.canon.rss.logmanager.dto.job;
import lombok.Getter;
import lombok.Setter;
import lombok.experimental.Accessors;
@Getter
@Setter
@Accessors(chain = true)
public class ReqLocalJobStepAddDTO {
private Boolean enable;
private String stepType;
private String [] customEmails;
private int [] emailBookIds;
private int [] groupBookIds;
private int [] fileIndices;
}
|
/*
* Copyright 2020 ThoughtWorks, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.thoughtworks.go.plugin.access.configrepo;
import com.thoughtworks.go.plugin.configrepo.contract.CRConfigurationProperty;
import com.thoughtworks.go.plugin.configrepo.contract.CRParseResult;
import com.thoughtworks.go.plugin.configrepo.contract.CRPipeline;
import com.thoughtworks.go.plugin.domain.common.Image;
import com.thoughtworks.go.plugin.domain.configrepo.Capabilities;
import java.util.Collection;
import java.util.Map;
public interface JsonMessageHandler {
String requestMessageForParseDirectory(String destinationFolder, Collection<CRConfigurationProperty> configurations);
String requestMessageForParseContent(Map<String, String> contents);
CRParseResult responseMessageForParseDirectory(String responseBody);
CRParseResult responseMessageForParseContent(String responseBody);
String requestMessageForPipelineExport(CRPipeline pipeline);
ExportedConfig responseMessageForPipelineExport(String responseBody, Map<String, String> headers);
Capabilities getCapabilitiesFromResponse(String responseBody);
Image getImageResponseFromBody(String responseBody);
String requestMessageConfigFiles(String destinationFolder, Collection<CRConfigurationProperty> configurations);
ConfigFileList responseMessageForConfigFiles(String responseBody);
}
|
package io.jmix.tests.screen.spaceport;
import io.jmix.ui.screen.*;
import io.jmix.tests.entity.spaceport.SpacePort;
@UiController("SpacePort.browse")
@UiDescriptor("space-port-browse.xml")
@LookupComponent("spacePortsTable")
public class SpacePortBrowse extends StandardLookup<SpacePort> {
}
|
package jam.util;
import java.util.HashMap;
import java.util.Map;
import java.util.TreeMap;
import java.util.function.Function;
/**
* Provides a map that supports on-demand object creation.
*
* <p>When a user calls {@code get(key)} with a key that is not
* contained in the map, a new object is created on-demand by a
* factory method passed to the map when it was constructed.
*/
public final class AutoMap<K, V> extends MapWrapper<K, V> {
private final Function<K, V> factory;
private AutoMap(Map<K, V> map, Function<K, V> factory) {
super(map);
this.factory = factory;
}
/**
* Creates a new {@code AutoMap} backed by a {@code HashMap}.
*
* @param <K> the type of keys maintained by the map.
*
* @param <V> the type of values maintained by the map.
*
* @param factory the factory function used to create on-demand
* instances.
*
* @return a new {@code AutoMap} backed by a {@code HashMap}.
*/
public static <K, V> AutoMap<K, V> hash(Function<K, V> factory) {
return new AutoMap<K, V>(new HashMap<K, V>(), factory);
}
/**
* Creates a new {@code AutoMap} backed by a {@code TreeMap}.
*
* @param <K> the type of keys maintained by the map.
*
* @param <V> the type of values maintained by the map.
*
* @param factory the factory function used to create on-demand
* instances.
*
* @return a new {@code AutoMap} backed by a {@code TreeMap}.
*/
public static <K, V> AutoMap<K, V> tree(Function<K, V> factory) {
return new AutoMap<K, V>(new TreeMap<K, V>(), factory);
}
/**
* Returns the factory function used to create on-demand
* instances.
*
* @return the factory function used to create on-demand
* instances.
*/
public Function<K, V> getFactory() {
return factory;
}
/**
* Returns the object to which the specified key is mapped; if no
* mapping exists, this map will create a new on-demand instance
* using the factory method and insert that object into the map.
*
* @param key the key of the object to retrieve (or create).
*
* @return the object to which the specified key is mapped, or a
* new on-demand instance for the key.
*
* @throws RuntimeException if the key is not contained in this
* map (and therefore a new on-demand instance is required) and
* the key cannot be cast to the proper runtime type.
*/
@SuppressWarnings("unchecked") @Override public V get(Object key) {
if (!containsKey(key))
put((K) key, factory.apply((K) key));
return super.get(key);
}
}
|
package com.bjev.esb.vo.resp;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
/**
* UserInfoRespVO
*
* @author wenbin
* @version V1.0
* @date 2020年3月18日
*/
@Data
public class UserInfoRespVO {
@ApiModelProperty(value = "用户id")
private String id;
@ApiModelProperty(value = "账号")
private String username;
@ApiModelProperty(value = "手机号")
private String phone;
@ApiModelProperty(value = "昵称")
private String nickName;
@ApiModelProperty(value = "真实姓名")
private String realName;
@ApiModelProperty(value = "所属机构id")
private String deptId;
@ApiModelProperty(value = "所属机构名称")
private String deptName;
}
|
package cn.hikyson.godeye.core.internal.modules.pageload;
import androidx.annotation.Keep;
import java.io.Serializable;
@Keep
public enum PageType implements Serializable {
ACTIVITY,
FRAGMENT,
UNKNOWN
}
|
package com.jungle68.ibook.activity;
import android.content.DialogInterface;
import android.content.Intent;
import android.os.Bundle;
import android.support.v7.app.AlertDialog;
import android.support.v7.app.AppCompatActivity;
import android.support.v7.widget.LinearLayoutManager;
import android.support.v7.widget.RecyclerView;
import android.support.v7.widget.SearchView;
import android.support.v7.widget.Toolbar;
import android.view.Menu;
import android.view.MenuItem;
import android.view.View;
import com.jungle68.ibook.R;
import com.jungle68.ibook.bean.Question;
import com.jungle68.ibook.dao.QuestionDaoImp;
import com.jungle68.ibook.utils.LinearDecoration;
import com.jungle68.ibook.utils.SharePreferenceUtils;
import com.zhy.adapter.recyclerview.CommonAdapter;
import com.zhy.adapter.recyclerview.MultiItemTypeAdapter;
import com.zhy.adapter.recyclerview.base.ViewHolder;
import com.zhy.adapter.recyclerview.wrapper.EmptyWrapper;
import java.util.ArrayList;
import java.util.List;
import butterknife.BindView;
import butterknife.ButterKnife;
public class SearchActivity extends AppCompatActivity implements MultiItemTypeAdapter.OnItemClickListener {
public static final String KEY_TYPE = "type";
public static final int SEARCH_TYPE_ID = 0;
public static final int SEARCH_TYPE_NO = 1;
public static final int SEARCH_TYPE_NAME = 2;
public static final int SEARCH_TYPE_FENCE_NAME = 3;
public static final int SEARCH_TYPE_FENCE_RULE = 4;
public static final int SEARCH_TYPE_QUSTION = 5;
public static final int SEARCH_TYPE_ANSTER = 6;
@BindView(R.id.toolbar)
Toolbar toolbar;
@BindView(R.id.rv_lsit)
RecyclerView rvLsit;
private int mType;
private QuestionDaoImp questionDaoImp;
private List<Question> mDatas = new ArrayList<>();
private CommonAdapter mAdapter;
private EmptyWrapper mEmptyWrapper;
private AlertDialog mDialog;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_search);
ButterKnife.bind(this);
//设置我们的ToolBar
setSupportActionBar(toolbar);
questionDaoImp = new QuestionDaoImp(getApplication());
mType = getIntent().getIntExtra(KEY_TYPE, SEARCH_TYPE_ID);
initReCycleView();
}
private void doSearch(String query) {
List<Question> datas = null;
switch (mType) {
case SEARCH_TYPE_ID:
datas = questionDaoImp.qureyDataById(query);
break;
case SEARCH_TYPE_NO:
datas = questionDaoImp.qureyDataById(query);
break;
case SEARCH_TYPE_NAME:
datas = questionDaoImp.qureyDataByName(query);
break;
case SEARCH_TYPE_FENCE_NAME:
datas = questionDaoImp.qureyDataByfencename(query);
break;
case SEARCH_TYPE_FENCE_RULE:
datas = questionDaoImp.qureyDataByRule(query);
break;
case SEARCH_TYPE_QUSTION:
datas = questionDaoImp.qureyDataByQuestion(query);
break;
case SEARCH_TYPE_ANSTER:
datas = questionDaoImp.qureyDataByAnster(query);
break;
}
mDatas.clear();
mDatas.addAll(datas);
mEmptyWrapper.notifyDataSetChanged();
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
getMenuInflater().inflate(R.menu.search, menu);
final MenuItem myActionMenuItem = menu.findItem(R.id.action_search);
final SearchView searchView = (SearchView) myActionMenuItem.getActionView();
searchView.setOnQueryTextListener(new SearchView.OnQueryTextListener() {
@Override
public boolean onQueryTextSubmit(String query) {
doSearch(query);
// Toast like print
if (!searchView.isIconified()) {
searchView.setIconified(true);
}
myActionMenuItem.collapseActionView();
return false;
}
@Override
public boolean onQueryTextChange(String s) {
// UserFeedback.show( "SearchOnQueryTextChanged: " + s);
return false;
}
});
return true;
}
private void initReCycleView() {
rvLsit.setHasFixedSize(true);
rvLsit.setLayoutManager(new LinearLayoutManager(this));
rvLsit.addItemDecoration(new LinearDecoration(60, 0, 0, 0));
mAdapter = new CommonAdapter<Question>(this, R.layout.item_content, mDatas) {
@Override
protected void convert(ViewHolder viewHolder, Question question, int position) {
viewHolder.setText(R.id.tv_number, "[" + question.get_id() + "] " + question.getId() + "、" + question.getQuestion());
viewHolder.setVisible(R.id.cb_a, true);
viewHolder.setVisible(R.id.cb_b, true);
viewHolder.setVisible(R.id.cb_c, true);
viewHolder.setVisible(R.id.cb_d, true);
viewHolder.setText(R.id.cb_a, question.getItema());
viewHolder.setText(R.id.cb_b, question.getItemb());
viewHolder.setText(R.id.cb_c, question.getItemc());
viewHolder.setText(R.id.cb_d, question.getItemd());
viewHolder.setText(R.id.tv_anster, "答案:" + question.getAnser());
}
};
mEmptyWrapper = new EmptyWrapper(mAdapter);
mEmptyWrapper.setEmptyView(R.layout.view_empty);
rvLsit.setAdapter(mEmptyWrapper);
mAdapter.setOnItemClickListener(this);
}
@Override
public void onItemClick(View view, RecyclerView.ViewHolder holder, final int position) {
mDialog = new AlertDialog.Builder(this)
.setTitle(R.string.choose_qustion_dialog_title)
.setMessage(R.string.choose_qustion_dialog_message)
.setPositiveButton(R.string.ok, new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int which) {
SharePreferenceUtils.saveLong(getApplicationContext(), SharePreferenceUtils.TAG_MCURRENTID, mDatas.get(position).get_id());
System.out.println("question = " + mDatas.get(position).get_id());
mDialog.dismiss();
startActivity(new Intent(SearchActivity.this,MainActivity.class));
finish();
}
})
.setNegativeButton(R.string.cancel, new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int which) {
mDialog.dismiss();
}
})
.create();
mDialog.show();
}
@Override
public boolean onItemLongClick(View view, RecyclerView.ViewHolder holder, int position) {
return false;
}
}
|
package desafios.diarios;
public class Book {
protected String title;
protected String author;
public Book() {
this.title = "";
this.author = "";
}
public Book(String title, String author) {
this.title = title;
this.author = author;
}
void display() {
System.out.println("Title: " + title);
System.out.println("Author: " + author);
}
}
|
/*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.amplify.model;
import java.io.Serializable;
import javax.annotation.Generated;
/**
* <p>
* The result structure for the update branch request.
* </p>
*
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/UpdateBranch" target="_top">AWS API
* Documentation</a>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class UpdateBranchResult extends com.amazonaws.AmazonWebServiceResult<com.amazonaws.ResponseMetadata> implements Serializable, Cloneable {
/**
* <p>
* The branch for an Amplify app, which maps to a third-party repository branch.
* </p>
*/
private Branch branch;
/**
* <p>
* The branch for an Amplify app, which maps to a third-party repository branch.
* </p>
*
* @param branch
* The branch for an Amplify app, which maps to a third-party repository branch.
*/
public void setBranch(Branch branch) {
this.branch = branch;
}
/**
* <p>
* The branch for an Amplify app, which maps to a third-party repository branch.
* </p>
*
* @return The branch for an Amplify app, which maps to a third-party repository branch.
*/
public Branch getBranch() {
return this.branch;
}
/**
* <p>
* The branch for an Amplify app, which maps to a third-party repository branch.
* </p>
*
* @param branch
* The branch for an Amplify app, which maps to a third-party repository branch.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public UpdateBranchResult withBranch(Branch branch) {
setBranch(branch);
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getBranch() != null)
sb.append("Branch: ").append(getBranch());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof UpdateBranchResult == false)
return false;
UpdateBranchResult other = (UpdateBranchResult) obj;
if (other.getBranch() == null ^ this.getBranch() == null)
return false;
if (other.getBranch() != null && other.getBranch().equals(this.getBranch()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getBranch() == null) ? 0 : getBranch().hashCode());
return hashCode;
}
@Override
public UpdateBranchResult clone() {
try {
return (UpdateBranchResult) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
}
|
package com.vk.api.sdk.queries.wall;
import com.vk.api.sdk.client.AbstractQueryBuilder;
import com.vk.api.sdk.client.VkApiClient;
import com.vk.api.sdk.client.actors.UserActor;
import com.vk.api.sdk.objects.base.responses.OkResponse;
import java.util.Arrays;
import java.util.List;
/**
* Query for Wall.delete method
*/
public class WallDeleteQuery extends AbstractQueryBuilder<WallDeleteQuery, OkResponse> {
/**
* Creates a AbstractQueryBuilder instance that can be used to build api request with various parameters
*
* @param client VK API client
* @param actor actor with access token
*/
public WallDeleteQuery(VkApiClient client, UserActor actor) {
super(client, "wall.delete", OkResponse.class);
accessToken(actor.getAccessToken());
}
/**
* User ID or community ID. Use a negative value to designate a community ID.
*
* @param value value of "owner id" parameter.
* @return a reference to this {@code AbstractQueryBuilder} object to fulfill the "Builder" pattern.
*/
public WallDeleteQuery ownerId(Integer value) {
return unsafeParam("owner_id", value);
}
/**
* ID of the post to be deleted.
*
* @param value value of "post id" parameter. Minimum is 0.
* @return a reference to this {@code AbstractQueryBuilder} object to fulfill the "Builder" pattern.
*/
public WallDeleteQuery postId(Integer value) {
return unsafeParam("post_id", value);
}
@Override
protected WallDeleteQuery getThis() {
return this;
}
@Override
protected List<String> essentialKeys() {
return Arrays.asList("access_token");
}
}
|
/*
* Copyright 2018 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kie.workbench.common.workbench.client.entrypoint;
import javax.annotation.PostConstruct;
import javax.enterprise.context.ApplicationScoped;
import javax.enterprise.event.Event;
import javax.inject.Inject;
import elemental2.dom.HTMLButtonElement;
import elemental2.dom.HTMLDivElement;
import elemental2.dom.HTMLElement;
import elemental2.dom.HTMLTextAreaElement;
import org.jboss.errai.ui.client.local.spi.TranslationService;
import org.jboss.errai.ui.shared.api.annotations.DataField;
import org.jboss.errai.ui.shared.api.annotations.EventHandler;
import org.jboss.errai.ui.shared.api.annotations.ForEvent;
import org.jboss.errai.ui.shared.api.annotations.Templated;
import org.kie.workbench.common.workbench.client.resources.i18n.DefaultWorkbenchConstants;
import org.uberfire.client.util.Clipboard;
import org.uberfire.ext.editor.commons.client.file.popups.elemental2.Elemental2Modal;
import org.uberfire.workbench.events.NotificationEvent;
import static elemental2.dom.DomGlobal.console;
import static org.uberfire.workbench.events.NotificationEvent.NotificationType.SUCCESS;
import static org.uberfire.workbench.events.NotificationEvent.NotificationType.WARNING;
@Templated
@ApplicationScoped
public class GenericErrorPopup extends Elemental2Modal<GenericErrorPopup> implements Elemental2Modal.View<GenericErrorPopup> {
@Inject
@DataField("header")
private HTMLDivElement header;
@Inject
@DataField("body")
private HTMLDivElement body;
@Inject
@DataField("footer")
private HTMLDivElement footer;
@Inject
@DataField("ignore-button")
private HTMLButtonElement ignoreButton;
@Inject
@DataField("copy-details-button")
private HTMLButtonElement copyDetailsButton;
@Inject
@DataField("error-details")
private HTMLTextAreaElement errorDetails;
@Inject
private Event<NotificationEvent> notificationEvent;
private final Clipboard clipboard;
@Inject
public GenericErrorPopup(final GenericErrorPopup view,
final Clipboard clipboard) {
super(view);
this.clipboard = clipboard;
}
@PostConstruct
public void init() {
super.setup();
}
@Override
public void init(final GenericErrorPopup this_) {
}
public void setup(final String details) {
if (isShowing()) {
//If multiple errors occur, we want to know the details of each one of them. In order.
errorDetails.textContent += " | " + details;
} else {
errorDetails.textContent = details;
}
}
@EventHandler("ignore-button")
private void onIgnoreButtonClicked(final @ForEvent("click") elemental2.dom.Event e) {
console.error(errorDetails.textContent);
hide();
}
@EventHandler("copy-details-button")
private void onCopyDetailsButtonClicked(final @ForEvent("click") elemental2.dom.Event e) {
final boolean copySucceeded = clipboard.copy(errorDetails);
if (copySucceeded) {
notificationEvent.fire(new NotificationEvent(DefaultWorkbenchConstants.INSTANCE.ErrorDetailsSuccessfullyCopiedToClipboard(), SUCCESS));
} else {
notificationEvent.fire(new NotificationEvent(DefaultWorkbenchConstants.INSTANCE.ErrorDetailsFailedToBeCopiedToClipboard(), WARNING));
}
console.error(errorDetails.textContent);
hide();
}
@Override
public String getHeader() {
return header.textContent;
}
@Override
public HTMLElement getBody() {
return body;
}
@Override
public HTMLElement getFooter() {
return footer;
}
}
|
/*
* #%L
* Service Locator Client for CXF
* %%
* Copyright (c) 2006-2021 Talend Inc. - www.talend.com
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
package org.talend.esb.servicelocator.client.internal.zk;
import static org.easymock.EasyMock.anyObject;
import static org.easymock.EasyMock.createMock;
import static org.easymock.EasyMock.eq;
import static org.easymock.EasyMock.expect;
import static org.easymock.EasyMock.replay;
import static org.easymock.EasyMock.verify;
import static org.hamcrest.Matchers.containsInAnyOrder;
import static org.hamcrest.Matchers.equalTo;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import static org.talend.esb.servicelocator.TestValues.SERVICE_QNAME_1;
import static org.talend.esb.servicelocator.TestValues.SERVICE_QNAME_2;
import java.io.UnsupportedEncodingException;
import java.util.Arrays;
import java.util.List;
import javax.xml.namespace.QName;
import org.apache.zookeeper.CreateMode;
import org.junit.Before;
import org.junit.Test;
import org.talend.esb.servicelocator.client.internal.ServiceNode;
import org.talend.esb.servicelocator.client.internal.zk.ZKBackend.NodeMapper;
public class RootNodeImplTest {
public static final String V_5_1 = "5.1";
public static final String V_5_2_0 = "5.2.0";
private ZKBackend backend;
private RootNodeImpl rootNode;
@Before
public void setup() {
backend = createMock(ZKBackend.class);
rootNode = new RootNodeImpl(backend);
}
@Test
public void getServiceNode() {
ServiceNode node = rootNode.getServiceNode(SERVICE_QNAME_1);
assertEquals(SERVICE_QNAME_1, node.getServiceName());
}
@Test
public void getServiceNames() throws Exception {
RootNodeImpl eqRootNode = eq(rootNode);
NodeMapper<QName> anyBinder = anyObject();
expect(backend.getChildren(eqRootNode, anyBinder)).
andReturn(Arrays.asList(SERVICE_QNAME_1, SERVICE_QNAME_2));
replay(backend);
List<QName> serviceNames = rootNode.getServiceNames();
assertThat(serviceNames, containsInAnyOrder(SERVICE_QNAME_1, SERVICE_QNAME_2));
verify(backend);
}
@Test
public void existsTrue() throws Exception {
expect(backend.nodeExists(rootNode)).andReturn(true);
replay(backend);
assertTrue(rootNode.exists());
verify(backend);
}
@Test
public void existsFalse() throws Exception {
expect(backend.nodeExists(rootNode)).andReturn(false);
replay(backend);
assertFalse(rootNode.exists());
verify(backend);
}
@Test
public void ensureExists() throws Exception {
backend.ensurePathExists(rootNode, CreateMode.PERSISTENT);
replay(backend);
rootNode.ensureExists();
verify(backend);
}
@Test
public void isAuthenticationEnabledTrue() throws Exception {
expect(backend.nodeExists(rootNode)).andReturn(true);
expect(backend.getContent(rootNode)).andReturn(getData(V_5_2_0, true));
replay(backend);
assertTrue(rootNode.isAuthenticationEnabled());
verify(backend);
}
@Test
public void isAuthenticationEnabledFalse() throws Exception {
expect(backend.nodeExists(rootNode)).andReturn(true);
expect(backend.getContent(rootNode)).andReturn(getData(V_5_2_0, false));
replay(backend);
assertFalse(rootNode.isAuthenticationEnabled());
verify(backend);
}
@Test
public void isAuthenticationEnabledNoContent() throws Exception {
expect(backend.nodeExists(rootNode)).andReturn(true);
expect(backend.getContent(rootNode)).andReturn(new byte[0]);
replay(backend);
assertFalse(rootNode.isAuthenticationEnabled());
verify(backend);
}
@Test
public void getVersion() throws Exception {
expect(backend.nodeExists(rootNode)).andReturn(true);
expect(backend.getContent(rootNode)).andReturn(getData(V_5_2_0, true));
replay(backend);
String version = rootNode.getVersion();
assertThat(version, equalTo(V_5_2_0));
verify(backend);
}
@Test
public void getVersionOtherValue() throws Exception {
expect(backend.nodeExists(rootNode)).andReturn(true);
expect(backend.getContent(rootNode)).andReturn(getData(V_5_1, true));
replay(backend);
String version = rootNode.getVersion();
assertThat(version, equalTo(V_5_1));
verify(backend);
}
@Test
public void getVersionNoContent() throws Exception {
expect(backend.nodeExists(rootNode)).andReturn(true);
expect(backend.getContent(rootNode)).andReturn(new byte[0]);
replay(backend);
String version = rootNode.getVersion();
assertThat(version, equalTo(V_5_1));
verify(backend);
}
@Test
public void contentOnlyRetrievedOnce() throws Exception {
expect(backend.nodeExists(rootNode)).andReturn(true);
expect(backend.getContent(rootNode)).andReturn(getData(V_5_1, false));
replay(backend);
String version = rootNode.getVersion();
assertFalse(rootNode.isAuthenticationEnabled());
assertThat(version, equalTo(V_5_1));
verify(backend);
}
@Test
public void retrieveContentNodeDoesntExist() throws Exception {
expect(backend.nodeExists(rootNode)).andReturn(false);
replay(backend);
String version = rootNode.getVersion();
assertFalse(rootNode.isAuthenticationEnabled());
assertThat(version, equalTo(V_5_1));
verify(backend);
}
private byte[] getData(String version, boolean authenticated) throws UnsupportedEncodingException {
String combined = version + "," + Boolean.toString(authenticated);
return combined.getBytes("utf-8");
}
}
|
package com.tastybug.timetracker.extension.backup.controller.dataexport;
import android.content.Context;
import com.tastybug.timetracker.core.model.json.JsonMarshallingBuilder;
import org.json.JSONArray;
import org.json.JSONException;
import java.io.UnsupportedEncodingException;
class DataExportCreator {
private JsonMarshallingBuilder jsonMarshallingBuilder;
DataExportCreator(Context context) {
this.jsonMarshallingBuilder = new JsonMarshallingBuilder(context);
}
DataExportCreator(JsonMarshallingBuilder jsonMarshallingBuilder) {
this.jsonMarshallingBuilder = jsonMarshallingBuilder;
}
byte[] getDataAsByteArray() throws JSONException, UnsupportedEncodingException {
JSONArray jsonArray = jsonMarshallingBuilder.build();
return jsonArray.toString().getBytes("utf-8");
}
}
|
package cn.hmxz.modules.sys.service.impl;
import cn.hmxz.modules.sys.dao.SysConfigDao;
import cn.hmxz.modules.sys.entity.SysConfigEntity;
import com.alibaba.fastjson.JSON;
import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.service.impl.ServiceImpl;
import cn.hmxz.modules.sys.service.SysConfigService;
import cn.hmxz.common.exception.RRException;
import cn.hmxz.common.utils.PageUtils;
import cn.hmxz.common.utils.Query;
import org.apache.commons.lang3.StringUtils;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import java.util.Arrays;
import java.util.Map;
@Service("sysConfigService")
public class SysConfigServiceImpl extends ServiceImpl<SysConfigDao, SysConfigEntity> implements SysConfigService {
@Override
public PageUtils queryPage(Map<String, Object> params) {
String paramKey = (String) params.get("paramKey");
IPage<SysConfigEntity> page = this.page(
new Query<SysConfigEntity>().getPage(params),
new QueryWrapper<SysConfigEntity>()
.like(StringUtils.isNotBlank(paramKey), "param_key", paramKey)
.eq("status", 1)
);
return new PageUtils(page);
}
@Override
public void saveConfig(SysConfigEntity config) {
this.save(config);
}
@Override
@Transactional(rollbackFor = Exception.class)
public void update(SysConfigEntity config) {
this.updateById(config);
}
@Override
@Transactional(rollbackFor = Exception.class)
public void updateValueByKey(String key, String value) {
baseMapper.updateValueByKey(key, value);
}
@Override
@Transactional(rollbackFor = Exception.class)
public void deleteBatch(Long[] ids) {
this.removeByIds(Arrays.asList(ids));
}
@Override
public String getValue(String key) {
SysConfigEntity config = baseMapper.queryByKey(key);
return config == null ? null : config.getParamValue();
}
@Override
public SysConfigEntity getSysConfig(String key) {
return baseMapper.queryByKey(key);
}
@Override
public <T> T getConfigObject(String key, Class<T> clazz) {
String value = getValue(key);
if (StringUtils.isNotBlank(value)) {
return JSON.parseObject(value, clazz);
}
try {
return clazz.newInstance();
} catch (Exception e) {
throw new RRException("获取参数失败");
}
}
}
|
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package Overloading;
/**
*
* @author anitha.ramatenki
*/
public class Shirt {
private String description="-desccription required-";
public char colorCode='U';
private double price=0.0;
public Shirt(char colorCode,double price){
setColorCode(colorCode);
setPrice(price);
}
public Shirt(char colorCode, double price, String description){
this(colorCode,price);
setDescription(description);
}
public void setDescription(String newDescription) {
description=newDescription;
}
public void setPrice(double newPrice){
if(newPrice<0.00){
System.out.println("Price must not be negative");
return;
}
price=newPrice;
}
public char getColorCode(){
return colorCode;}
public void setColorCode(char newCode){
if(newCode=='R'){
colorCode=newCode;
return;
}
if(newCode=='G')
{
colorCode=newCode;
return;
}
if(newCode=='B')
{
colorCode=newCode;
return;
}
System.out.println("Invalid color code. Use R, G, B");
}
public void display(){
System.out.println("Shirt:[" + description + ","+ colorCode + ","+ price +"]");
}
}
|
package me.list.twitchboard.view;
/**
* Created by masterjefferson on 7/23/2016.
*/
public interface ChatView {
void showNotification(String msg);
void showSentMessage(String msg);
}
|
/*
* Copyright 2017 Daniel Nilsson
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
* file except in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under
* the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.github.dannil.scbjavaclient.client.population.statistics.household;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import com.github.dannil.scbjavaclient.test.extensions.Date;
import com.github.dannil.scbjavaclient.test.extensions.Remote;
import com.github.dannil.scbjavaclient.test.extensions.Suite;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@Suite
@Remote
public class PopulationStatisticsHouseholdClientIT {
private PopulationStatisticsHouseholdClient client;
@BeforeEach
public void setup() {
this.client = new PopulationStatisticsHouseholdClient();
}
@Test
@Date("2017-05-10")
public void getNumberOfHouseholdsAndPersons() {
assertNotEquals(0, this.client.getNumberOfHouseholdsAndPersons().size());
}
@Test
@Date("2017-05-10")
public void getNumberOfHouseholdsAndPersonsWithParametersEmptyLists() {
assertNotEquals(0,
this.client.getNumberOfHouseholdsAndPersons(Collections.<String>emptyList(),
Collections.<String>emptyList(), Collections.<String>emptyList(),
Collections.<Integer>emptyList()).size());
}
@Test
@Date("2017-05-10")
public void getNumberOfHouseholdsAndPersonsWithParameters() {
List<String> regions = Arrays.asList("0162", "0180");
List<String> typesOfHouseholds = Arrays.asList("SMUB", "SAKNAS");
List<String> numberOfChildren = Arrays.asList("M1B", "M2B");
List<Integer> years = Arrays.asList(2014);
assertNotEquals(0, this.client.getNumberOfHouseholdsAndPersons(regions, typesOfHouseholds, numberOfChildren,
years).size());
}
@Test
@Date("2017-05-10")
public void getNumberOfPersons() {
assertNotEquals(0, this.client.getNumberOfPersons().size());
}
@Test
@Date("2017-05-10")
public void getNumberOfPersonsWithParametersEmptyLists() {
assertNotEquals(0,
this.client.getNumberOfPersons(Collections.<String>emptyList(), Collections.<String>emptyList(),
Collections.<Integer>emptyList(), Collections.<String>emptyList(),
Collections.<Integer>emptyList()).size());
}
@Test
@Date("2017-05-10")
public void getNumberOfPersonsWithParameters() {
List<String> typesOfHouseholds = Arrays.asList("SMUB", "SAKNAS");
List<String> householdStatuses = Arrays.asList("BBarn", "Bensm");
List<Integer> sexes = Arrays.asList(1, 2);
List<String> ages = Arrays.asList("0", "100+");
List<Integer> years = Arrays.asList(2014);
assertNotEquals(0,
this.client.getNumberOfPersons(typesOfHouseholds, householdStatuses, sexes, ages, years).size());
}
@Test
@Date("2017-05-10")
public void getNumberOfAndPercentageOfPersonsAndHouseholds() {
assertNotEquals(0, this.client.getNumberOfAndPercentageOfPersonsAndHouseholds().size());
}
@Test
@Date("2017-05-10")
public void getNumberOfAndPercentageOfPersonsAndHouseholdsWithParametersEmptyLists() {
assertNotEquals(0, this.client.getNumberOfAndPercentageOfPersonsAndHouseholds(Collections.<String>emptyList(),
Collections.<String>emptyList(), Collections.<Integer>emptyList()).size());
}
@Test
@Date("2017-05-10")
public void getNumberOfAndPercentageOfPersonsAndHouseholdsWithParameters() {
List<String> regions = Arrays.asList("0162", "0180");
List<String> householdSizes = Arrays.asList("4P", "US");
List<Integer> years = Arrays.asList(2014);
assertNotEquals(0,
this.client.getNumberOfAndPercentageOfPersonsAndHouseholds(regions, householdSizes, years).size());
}
}
|
package com.alipay.api.response;
import com.alipay.api.internal.mapping.ApiField;
import com.alipay.api.AlipayResponse;
/**
* ALIPAY API: alipay.ins.auto.autoinsprod.user.certify response.
*
* @author auto create
* @since 1.0, 2017-12-13 11:31:13
*/
public class AlipayInsAutoAutoinsprodUserCertifyResponse extends AlipayResponse {
private static final long serialVersionUID = 7872293854117317261L;
/**
* 验证结果
*/
@ApiField("agent_cert_result")
private String agentCertResult;
public void setAgentCertResult(String agentCertResult) {
this.agentCertResult = agentCertResult;
}
public String getAgentCertResult( ) {
return this.agentCertResult;
}
}
|
/*
* Copyright (C) 2019 highstreet technologies GmbH Intellectual Property.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
package org.onap.ccsdk.features.sdnr.wt.odlclient.remote;
import org.eclipse.jdt.annotation.NonNull;
import org.onap.ccsdk.features.sdnr.wt.odlclient.remote.transactions.RemoteDeviceReadOnlyTransaction;
import org.onap.ccsdk.features.sdnr.wt.odlclient.remote.transactions.RemoteDeviceReadWriteTransaction;
import org.onap.ccsdk.features.sdnr.wt.odlclient.remote.transactions.RemoteWriteOnlyTransaction;
import org.onap.ccsdk.features.sdnr.wt.odlclient.restconf.RestconfHttpClient;
import org.opendaylight.mdsal.binding.api.ReadTransaction;
import org.opendaylight.mdsal.binding.api.ReadWriteTransaction;
import org.opendaylight.mdsal.binding.api.WriteTransaction;
public class RemoteDeviceDataBroker extends RemoteDataBroker {
private final String nodeId;
public RemoteDeviceDataBroker(RestconfHttpClient odlClient, String nodeId) {
super(odlClient);
this.nodeId = nodeId;
}
@Override
public ReadTransaction newReadOnlyTransaction() {
return new RemoteDeviceReadOnlyTransaction(this.remoteOdlClient, this.nodeId);
}
@Override
public @NonNull ReadWriteTransaction newReadWriteTransaction() {
return new RemoteDeviceReadWriteTransaction(this.remoteOdlClient, this.nodeId);
}
@Override
public @NonNull WriteTransaction newWriteOnlyTransaction() {
return new RemoteWriteOnlyTransaction(this.remoteOdlClient, this.nodeId);
}
}
|
package org.wso2.carbon.esb.hl7.transport.test;
import org.apache.axiom.om.util.AXIOMUtil;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import org.wso2.carbon.automation.engine.annotations.ExecutionEnvironment;
import org.wso2.carbon.automation.engine.annotations.SetEnvironment;
import org.wso2.carbon.esb.hl7.transport.test.sample.HL7Sender;
import org.wso2.carbon.esb.hl7.transport.test.sample.HL7Server;
import org.wso2.esb.integration.common.utils.ESBIntegrationTest;
/**
* Need to test once the product is released with the actual feature repo link
*
* @author TOSH
*
*/
public class HL7TestCase extends ESBIntegrationTest {
@BeforeClass(alwaysRun = true)
public void init() throws Exception {
super.init();
}
@AfterClass(alwaysRun = true)
public void restoreServerConfiguration() throws Exception {
super.cleanup();
}
@SetEnvironment(executionEnvironments = { ExecutionEnvironment.STANDALONE })
@Test(groups = { "wso2.esb" }, description = "testing application ack")
public void testHLProxyApplicationAck() throws Exception {
addHL7ApplicationAckProxy();
HL7Server server = new HL7Server(9988);
server.start();
Thread.sleep(2000);
HL7Sender sender = new HL7Sender();
String response = sender.send("localhost", 9293);
Assert.assertTrue(response.contains("error msg"));
removeProxy("HL7ApplicationAckProxy");
}
private void addHL7ApplicationAckProxy() throws Exception {
addProxyService(AXIOMUtil.stringToOM("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
+ "<proxy xmlns=\"http://ws.apache.org/ns/synapse\" name=\"HL7ApplicationAckProxy\" transports=\"hl7\">\n"
+ " <target>\n"
+ " <inSequence> \n "
+ " <property name=\"HL7_APPLICATION_ACK\" value=\"true\" scope=\"axis2\"/>\n"
+ " <send> \n"
+ " <endpoint name=\"endpoint_urn_uuid_9CB8D06C91A1E996796270828144799-1418795938\">\n"
+ " <address uri=\"hl7://localhost:9988\"/>\n"
+ " </endpoint> \n"
+ " </send> \n"
+ " </inSequence> \n"
+ " <outSequence> \n"
+ " <property name=\"HL7_RESULT_MODE\" value=\"NACK\" scope=\"axis2\"/> \n"
+ " <property name=\"HL7_NACK_MESSAGE\" value=\"error msg\" scope=\"axis2\"/> \n"
+ " <send/> \n"
+ " </outSequence>\n"
+ " </target> \n"
+ " <parameter name=\"transport.hl7.AutoAck\">false</parameter> \n"
+ " <parameter name=\"transport.hl7.ValidateMessage\">true</parameter> \n"
+
" </proxy>"));
}
private void removeProxy(String proxyName) throws Exception {
deleteProxyService(proxyName);
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.