focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@Override
public String getCommandName() {
return COMMAND_NAME;
}
|
@Test
public void alluxioCmdExecuted()
throws IOException, AlluxioException, NoSuchFieldException, IllegalAccessException {
CollectAlluxioInfoCommand cmd = new CollectAlluxioInfoCommand(FileSystemContext.create());
// Write to temp dir
File targetDir = InfoCollectorTestUtils.createTemporaryDirectory();
CommandLine mockCommandLine = mock(CommandLine.class);
String[] mockArgs = new String[]{cmd.getCommandName(), targetDir.getAbsolutePath()};
when(mockCommandLine.getArgs())
.thenReturn(mockArgs);
when(mockCommandLine.getOptionValue(ArgumentMatchers.eq("output-dir"), ArgumentMatchers.eq("")))
.thenReturn(targetDir.getAbsolutePath());
// Replace commands to execute
Field f = cmd.getClass().getSuperclass().getDeclaredField("mCommands");
f.setAccessible(true);
CollectAlluxioInfoCommand.AlluxioCommand mockCommand =
mock(CollectAlluxioInfoCommand.AlluxioCommand.class);
when(mockCommand.runWithOutput())
.thenReturn(new CommandReturn(0, "nothing happens"));
Map<String, ShellCommand> mockCommandMap = new HashMap<>();
mockCommandMap.put("mockCommand", mockCommand);
f.set(cmd, mockCommandMap);
int ret = cmd.run(mockCommandLine);
assertEquals(0, ret);
// Verify the command has been run
verify(mockCommand).runWithOutput();
// Files will be copied to sub-dir of target dir
File subDir = new File(Paths.get(targetDir.getAbsolutePath(),
cmd.getCommandName()).toString());
assertEquals(new String[]{"collectAlluxioInfo.txt"}, subDir.list());
// Verify the command output is found
String fileContent = new String(Files.readAllBytes(subDir.listFiles()[0].toPath()));
assertTrue(fileContent.contains("nothing happens"));
}
|
public static CharSequence escapeCsv(CharSequence value) {
return escapeCsv(value, false);
}
|
@Test
public void escapeCsvWithLineFeedAtEnd() {
CharSequence value = "testing\n";
CharSequence expected = "\"testing\n\"";
escapeCsv(value, expected);
}
|
public Optional<Group> takeGroup(Set<Integer> rejectedGroups) {
synchronized (this) {
Optional<GroupStatus> best = scheduler.takeNextGroup(rejectedGroups);
if (best.isPresent()) {
GroupStatus gs = best.get();
gs.allocate();
Group ret = gs.group;
log.fine(() -> "Offering <" + ret + "> for query connection");
return Optional.of(ret);
} else {
return Optional.empty();
}
}
}
|
@Test
void requireThatLoadBalancerServesSingleNodeSetups() {
Node n1 = new Node("test", 0, "test-node1", 0);
LoadBalancer lb = new LoadBalancer(List.of(new Group(0, List.of(n1))), LoadBalancer.Policy.ROUNDROBIN);
Optional<Group> grp = lb.takeGroup(null);
Group group = grp.orElseThrow(() -> {
throw new IllegalStateException("Expected a SearchCluster.Group");
});
assertEquals(1, group.nodes().size());
}
|
public static MemoryRecords withRecords(Compression compression, SimpleRecord... records) {
return withRecords(RecordBatch.CURRENT_MAGIC_VALUE, compression, records);
}
|
@Test
public void testUnsupportedCompress() {
BiFunction<Byte, CompressionType, MemoryRecords> builderBiFunction = (magic, compressionType) ->
MemoryRecords.withRecords(magic, Compression.of(compressionType).build(), new SimpleRecord(10L, "key1".getBytes(), "value1".getBytes()));
Arrays.asList(MAGIC_VALUE_V0, MAGIC_VALUE_V1).forEach(magic -> {
Exception e = assertThrows(IllegalArgumentException.class, () -> builderBiFunction.apply(magic, CompressionType.ZSTD));
assertEquals(e.getMessage(), "ZStandard compression is not supported for magic " + magic);
});
}
|
@Override
public Set<String> keySet() {
if (this.prefix.isEmpty()) {
return this.backingConfig.keySet();
}
final HashSet<String> set = new HashSet<>();
int prefixLen = this.prefix.length();
for (String key : this.backingConfig.keySet()) {
if (key.startsWith(prefix)) {
set.add(key.substring(prefixLen));
}
}
return set;
}
|
@Test
void testDelegationConfigurationWithNullOrEmptyPrefix() {
Configuration backingConf = new Configuration();
backingConf.setValueInternal("test-key", "value", false);
assertThatThrownBy(() -> new DelegatingConfiguration(backingConf, null))
.isInstanceOf(NullPointerException.class);
DelegatingConfiguration configuration = new DelegatingConfiguration(backingConf, "");
assertThat(backingConf.keySet()).isEqualTo(configuration.keySet());
}
|
public String namespace(Namespace ns) {
return SLASH.join("v1", prefix, "namespaces", RESTUtil.encodeNamespace(ns));
}
|
@Test
public void testNamespaceWithMultipartNamespace() {
Namespace ns = Namespace.of("n", "s");
assertThat(withPrefix.namespace(ns)).isEqualTo("v1/ws/catalog/namespaces/n%1Fs");
assertThat(withoutPrefix.namespace(ns)).isEqualTo("v1/namespaces/n%1Fs");
}
|
@Override
public MergeAppend appendFile(DataFile file) {
add(file);
return this;
}
|
@TestTemplate
public void testMergeWithExistingManifestAfterDelete() {
// merge all manifests for this test
table.updateProperties().set("commit.manifest.min-count-to-merge", "1").commit();
assertThat(listManifestFiles()).isEmpty();
assertThat(readMetadata().lastSequenceNumber()).isEqualTo(0);
Snapshot snap = commit(table, table.newAppend().appendFile(FILE_A).appendFile(FILE_B), branch);
validateSnapshot(null, snap, 1, FILE_A, FILE_B);
TableMetadata base = readMetadata();
long baseId = snap.snapshotId();
assertThat(snap.allManifests(table.io())).hasSize(1);
ManifestFile initialManifest = snap.allManifests(table.io()).get(0);
validateManifest(
initialManifest,
dataSeqs(1L, 1L),
fileSeqs(1L, 1L),
ids(baseId, baseId),
files(FILE_A, FILE_B),
statuses(Status.ADDED, Status.ADDED));
Snapshot deleteSnapshot = commit(table, table.newDelete().deleteFile(FILE_A), branch);
V2Assert.assertEquals(
"Snapshot sequence number should be 2", 2, deleteSnapshot.sequenceNumber());
V2Assert.assertEquals(
"Last sequence number should be 2", 2, readMetadata().lastSequenceNumber());
V1Assert.assertEquals(
"Table should end with last-sequence-number 0", 0, readMetadata().lastSequenceNumber());
TableMetadata delete = readMetadata();
long deleteId = latestSnapshot(table, branch).snapshotId();
assertThat(latestSnapshot(table, branch).allManifests(table.io())).hasSize(1);
ManifestFile deleteManifest = deleteSnapshot.allManifests(table.io()).get(0);
validateManifest(
deleteManifest,
dataSeqs(1L, 1L),
fileSeqs(1L, 1L),
ids(deleteId, baseId),
files(FILE_A, FILE_B),
statuses(Status.DELETED, Status.EXISTING));
Snapshot committedSnapshot =
commit(table, table.newAppend().appendFile(FILE_C).appendFile(FILE_D), branch);
V2Assert.assertEquals(
"Snapshot sequence number should be 3", 3, committedSnapshot.sequenceNumber());
V2Assert.assertEquals(
"Last sequence number should be 3", 3, readMetadata().lastSequenceNumber());
V1Assert.assertEquals(
"Table should end with last-sequence-number 0", 0, readMetadata().lastSequenceNumber());
assertThat(committedSnapshot.allManifests(table.io())).hasSize(1);
ManifestFile newManifest = committedSnapshot.allManifests(table.io()).get(0);
assertThat(newManifest).isNotEqualTo(initialManifest);
long snapshotId = committedSnapshot.snapshotId();
// the deleted entry from the previous manifest should be removed
validateManifestEntries(
newManifest,
ids(snapshotId, snapshotId, baseId),
files(FILE_C, FILE_D, FILE_B),
statuses(Status.ADDED, Status.ADDED, Status.EXISTING));
}
|
@Override
public boolean match(String attributeValue) {
if (attributeValue == null) {
return false;
}
switch (type) {
case Equals:
return attributeValue.equals(value);
case StartsWith:
return (length == -1 || length == attributeValue.length()) && attributeValue.startsWith(value);
case EndsWith:
return (length == -1 || length == attributeValue.length()) && attributeValue.endsWith(value);
case Contains:
return attributeValue.contains(value);
case Regexp:
return regexPattern.matcher(attributeValue).matches();
default:
throw new IllegalStateException("Unexpected type " + type);
}
}
|
@Test
public void testSingleCharWildcard() {
LikeCondition likeCondition = new LikeCondition("a_b_c");
assertTrue(likeCondition.match("aXbYc"));
assertTrue(likeCondition.match("a_b_c"));
assertTrue(likeCondition.match("a%b%c"));
assertFalse(likeCondition.match("abc"));
assertFalse(likeCondition.match("aXXbYYc"));
}
|
@Override
public KeyValueIterator<Windowed<Bytes>, byte[]> backwardFetch(final Bytes key) {
return wrapped().backwardFetch(key);
}
|
@Test
public void shouldDelegateToUnderlyingStoreWhenBackwardFetchingRange() {
store.backwardFetch(bytesKey, bytesKey);
verify(inner).backwardFetch(bytesKey, bytesKey);
}
|
private static void discovery(XmlGenerator gen, DiscoveryConfig discovery) {
if (discovery.getNodeFilter() == null && discovery.getNodeFilterClass() == null
&& discovery.getDiscoveryStrategyConfigs().isEmpty()) {
return;
}
gen.open("discovery-strategies")
.node("node-filter", null, "class",
classNameOrImplClass(discovery.getNodeFilterClass(), discovery.getNodeFilter()));
for (DiscoveryStrategyConfig strategy : discovery.getDiscoveryStrategyConfigs()) {
gen.open("discovery-strategy", "class", strategy.getClassName(), "enabled", true)
.appendProperties(strategy.getProperties())
.close();
}
gen.close();
}
|
@Test
public void discovery() {
DiscoveryConfig expected = new DiscoveryConfig();
expected.setNodeFilterClass(randomString());
DiscoveryStrategyConfig discoveryStrategy = new DiscoveryStrategyConfig(randomString());
discoveryStrategy.addProperty("prop", randomString());
expected.addDiscoveryStrategyConfig(discoveryStrategy);
clientConfig.getNetworkConfig().setDiscoveryConfig(expected);
DiscoveryConfig actual = newConfigViaGenerator().getNetworkConfig().getDiscoveryConfig();
assertEquals(expected.getNodeFilterClass(), actual.getNodeFilterClass());
assertCollection(expected.getDiscoveryStrategyConfigs(), actual.getDiscoveryStrategyConfigs(),
new Comparator<DiscoveryStrategyConfig>() {
@Override
public int compare(DiscoveryStrategyConfig o1, DiscoveryStrategyConfig o2) {
assertMap(o1.getProperties(), o2.getProperties());
return o1.getClassName().equals(o2.getClassName()) ? 0 : -1;
}
});
}
|
static int majorVersionFromJavaSpecificationVersion() {
return majorVersion(SystemPropertyUtil.get("java.specification.version", "1.6"));
}
|
@Test
public void testMajorVersionFromJavaSpecificationVersion() {
final SecurityManager current = System.getSecurityManager();
try {
System.setSecurityManager(new SecurityManager() {
@Override
public void checkPropertyAccess(String key) {
if (key.equals("java.specification.version")) {
// deny
throw new SecurityException(key);
}
}
// so we can restore the security manager
@Override
public void checkPermission(Permission perm) {
}
});
assertEquals(6, PlatformDependent0.majorVersionFromJavaSpecificationVersion());
} finally {
System.setSecurityManager(current);
}
}
|
public static boolean isNameCoveredByPattern( String name, String pattern )
{
if ( name == null || name.isEmpty() || pattern == null || pattern.isEmpty() )
{
throw new IllegalArgumentException( "Arguments cannot be null or empty." );
}
final String needle = name.toLowerCase();
final String hayStack = pattern.toLowerCase();
if ( needle.equals( hayStack )) {
return true;
}
if ( hayStack.startsWith( "*." ) ) {
return needle.endsWith( hayStack.substring( 2 ) );
}
return false;
}
|
@Test
public void testNameCoverageExactMatch() throws Exception
{
// setup
final String name = "xmpp.example.org";
final String pattern = name;
// do magic
final boolean result = DNSUtil.isNameCoveredByPattern( name, pattern );
// verify
assertTrue( result );
}
|
public TreeCache start() throws Exception {
Preconditions.checkState(treeState.compareAndSet(TreeState.LATENT, TreeState.STARTED), "already started");
if (createParentNodes) {
client.createContainers(root.path);
}
client.getConnectionStateListenable().addListener(connectionStateListener);
if (client.getZookeeperClient().isConnected()) {
root.wasCreated();
}
return this;
}
|
@Test
public void testChildrenInitialized() throws Exception {
client.create().forPath("/test", "".getBytes());
client.create().forPath("/test/1", "1".getBytes());
client.create().forPath("/test/2", "2".getBytes());
client.create().forPath("/test/3", "3".getBytes());
cache = newTreeCacheWithListeners(client, "/test");
cache.start();
assertEvent(TreeCacheEvent.Type.NODE_ADDED, "/test");
assertEvent(TreeCacheEvent.Type.NODE_ADDED, "/test/1");
assertEvent(TreeCacheEvent.Type.NODE_ADDED, "/test/2");
assertEvent(TreeCacheEvent.Type.NODE_ADDED, "/test/3");
assertEvent(TreeCacheEvent.Type.INITIALIZED);
assertNoMoreEvents();
}
|
public Object toIdObject(String baseId) throws AmqpProtocolException {
if (baseId == null) {
return null;
}
try {
if (hasAmqpUuidPrefix(baseId)) {
String uuidString = strip(baseId, AMQP_UUID_PREFIX_LENGTH);
return UUID.fromString(uuidString);
} else if (hasAmqpUlongPrefix(baseId)) {
String longString = strip(baseId, AMQP_ULONG_PREFIX_LENGTH);
return UnsignedLong.valueOf(longString);
} else if (hasAmqpStringPrefix(baseId)) {
return strip(baseId, AMQP_STRING_PREFIX_LENGTH);
} else if (hasAmqpBinaryPrefix(baseId)) {
String hexString = strip(baseId, AMQP_BINARY_PREFIX_LENGTH);
byte[] bytes = convertHexStringToBinary(hexString);
return new Binary(bytes);
} else {
// We have a string without any type prefix, transmit it as-is.
return baseId;
}
} catch (IllegalArgumentException e) {
throw new AmqpProtocolException("Unable to convert ID value");
}
}
|
@Test
public void testToIdObjectWithStringContainingStringEncodingPrefix() throws Exception {
String suffix = "myStringSuffix";
String stringId = AMQPMessageIdHelper.AMQP_STRING_PREFIX + suffix;
Object idObject = messageIdHelper.toIdObject(stringId);
assertNotNull("null object should not have been returned", idObject);
assertEquals("expected id object was not returned", suffix, idObject);
}
|
public KsqlTarget target(final URI server) {
return target(server, Collections.emptyMap());
}
|
@Test
public void shouldSendHeartbeatRequest() throws Exception {
// Given:
KsqlHostInfoEntity entity = new KsqlHostInfoEntity(serverUri.getHost(), serverUri.getPort());
long timestamp = System.currentTimeMillis();
server.setResponseObject(new HeartbeatResponse(true));
// When:
KsqlTarget target = ksqlClient.target(serverUri);
target.postAsyncHeartbeatRequest(entity, timestamp);
Buffer body = server.waitForRequestBody();
HeartbeatMessage hbm = KsqlClientUtil.deserialize(body, HeartbeatMessage.class);
// Then:
assertThat(server.getHttpMethod(), is(HttpMethod.POST));
assertThat(server.getPath(), is("/heartbeat"));
assertThat(server.getHeaders().get("Accept"), is("application/json"));
assertThat(hbm, is(new HeartbeatMessage(entity, timestamp)));
}
|
public static <T> JSONSchema<T> of(SchemaDefinition<T> schemaDefinition) {
SchemaReader<T> reader = schemaDefinition.getSchemaReaderOpt()
.orElseGet(() -> new JacksonJsonReader<>(jsonMapper(), schemaDefinition.getPojo()));
SchemaWriter<T> writer = schemaDefinition.getSchemaWriterOpt()
.orElseGet(() -> new JacksonJsonWriter<>(jsonMapper()));
return new JSONSchema<>(parseSchemaInfo(schemaDefinition, SchemaType.JSON), schemaDefinition.getPojo(),
reader, writer);
}
|
@Test
public void testGetNativeSchema() throws SchemaValidationException {
JSONSchema<PC> schema2 = JSONSchema.of(PC.class);
org.apache.avro.Schema avroSchema2 = (Schema) schema2.getNativeSchema().get();
assertSame(schema2.schema, avroSchema2);
}
|
public static <K, V> Map<K, V> subtractMap(Map<? extends K, ? extends V> minuend, Map<? extends K, ? extends V> subtrahend) {
return minuend.entrySet().stream()
.filter(entry -> !subtrahend.containsKey(entry.getKey()))
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
}
|
@Test
public void testSubtractMapDoesntRemoveAnythingWhenEmptyMap() {
Map<String, String> mainMap = new HashMap<>();
mainMap.put("one", "1");
mainMap.put("two", "2");
mainMap.put("three", "3");
Map<String, String> secondaryMap = new HashMap<>();
Map<String, String> newMap = subtractMap(mainMap, secondaryMap);
assertEquals(3, newMap.size());
assertEquals("1", newMap.get("one"));
assertEquals("2", newMap.get("two"));
assertEquals("3", newMap.get("three"));
assertNotSame(newMap, mainMap);
}
|
static ColumnExtractor create(final Column column) {
final int index = column.index();
Preconditions.checkArgument(index >= 0, "negative index: " + index);
return column.namespace() == Namespace.KEY
? new KeyColumnExtractor(index)
: new ValueColumnExtractor(index);
}
|
@Test(expected = IllegalArgumentException.class)
public void shouldThrowOnNegativeIndex() {
// Given:
when(column.index()).thenReturn(-1);
// When:
TimestampColumnExtractors.create(column);
}
|
public static Set<Set<LogicalVertex>> computePipelinedRegions(
final Iterable<? extends LogicalVertex> topologicallySortedVertices) {
final Map<LogicalVertex, Set<LogicalVertex>> vertexToRegion =
PipelinedRegionComputeUtil.buildRawRegions(
topologicallySortedVertices,
LogicalPipelinedRegionComputeUtil::getMustBePipelinedConsumedResults);
// Since LogicalTopology is a DAG, there is no need to do cycle detection nor to merge
// regions on cycles.
return uniqueVertexGroups(vertexToRegion);
}
|
@Test
void testOneInputSplitsIntoTwo() {
JobVertex v1 = new JobVertex("v1");
JobVertex v2 = new JobVertex("v2");
JobVertex v3 = new JobVertex("v3");
JobVertex v4 = new JobVertex("v4");
v2.connectNewDataSetAsInput(
v1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
v3.connectNewDataSetAsInput(
v2, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
v4.connectNewDataSetAsInput(
v2, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
Set<Set<LogicalVertex>> regions = computePipelinedRegions(v1, v2, v3, v4);
checkRegionSize(regions, 2, 3, 1);
}
|
@Override
public void doAfterResponse(String remoteAddr, RemotingCommand request, RemotingCommand response) {
if (RequestCode.GET_ROUTEINFO_BY_TOPIC != request.getCode()) {
return;
}
if (response == null || response.getBody() == null || ResponseCode.SUCCESS != response.getCode()) {
return;
}
boolean zoneMode = Boolean.parseBoolean(request.getExtFields().get(MixAll.ZONE_MODE));
if (!zoneMode) {
return;
}
String zoneName = request.getExtFields().get(MixAll.ZONE_NAME);
if (StringUtils.isBlank(zoneName)) {
return;
}
TopicRouteData topicRouteData = RemotingSerializable.decode(response.getBody(), TopicRouteData.class);
response.setBody(filterByZoneName(topicRouteData, zoneName).encode());
}
|
@Test
public void testDoAfterResponseWithValidZoneFiltering() throws Exception {
HashMap<String, String> extFields = new HashMap<>();
extFields.put(MixAll.ZONE_MODE, "true");
extFields.put(MixAll.ZONE_NAME,"zone1");
RemotingCommand request = RemotingCommand.createRequestCommand(105,null);
request.setExtFields(extFields);
RemotingCommand response = RemotingCommand.createResponseCommand(null);
response.setCode(ResponseCode.SUCCESS);
TopicRouteData topicRouteData = createSampleTopicRouteData();
response.setBody(RemotingSerializable.encode(topicRouteData));
zoneRouteRPCHook.doAfterResponse("", request, response);
HashMap<Long,String> brokeraddrs = new HashMap<>();
brokeraddrs.put(MixAll.MASTER_ID,"127.0.0.1:10911");
topicRouteData.getBrokerDatas().get(0).setBrokerAddrs(brokeraddrs);
response.setBody(RemotingSerializable.encode(topicRouteData));
zoneRouteRPCHook.doAfterResponse("", request, response);
topicRouteData.getQueueDatas().add(createQueueData("BrokerB"));
HashMap<Long,String> brokeraddrsB = new HashMap<>();
brokeraddrsB.put(MixAll.MASTER_ID,"127.0.0.1:10912");
BrokerData brokerData1 = createBrokerData("BrokerB","zone2",brokeraddrsB);
BrokerData brokerData2 = createBrokerData("BrokerC","zone1",null);
topicRouteData.getBrokerDatas().add(brokerData1);
topicRouteData.getBrokerDatas().add(brokerData2);
response.setBody(RemotingSerializable.encode(topicRouteData));
zoneRouteRPCHook.doAfterResponse("", request, response);
topicRouteData.getFilterServerTable().put("127.0.0.1:10911",new ArrayList<>());
response.setBody(RemotingSerializable.encode(topicRouteData));
zoneRouteRPCHook.doAfterResponse("", request, response);
Assert.assertEquals(1,RemotingSerializable
.decode(response.getBody(), TopicRouteData.class)
.getFilterServerTable()
.size());
topicRouteData.getFilterServerTable().put("127.0.0.1:10912",new ArrayList<>());
response.setBody(RemotingSerializable.encode(topicRouteData));
zoneRouteRPCHook.doAfterResponse("", request, response);
Assert.assertEquals(1,RemotingSerializable
.decode(response.getBody(), TopicRouteData.class)
.getFilterServerTable()
.size());
}
|
public static PointList simplify(ResponsePath responsePath, RamerDouglasPeucker ramerDouglasPeucker, boolean enableInstructions) {
final PointList pointList = responsePath.getPoints();
List<Partition> partitions = new ArrayList<>();
// make sure all waypoints are retained in the simplified point list
// we copy the waypoint indices into temporary intervals where they will be mutated by the simplification,
// afterwards we need to update the way point indices accordingly.
List<Interval> intervals = new ArrayList<>();
for (int i = 0; i < responsePath.getWaypointIndices().size() - 1; i++)
intervals.add(new Interval(responsePath.getWaypointIndices().get(i), responsePath.getWaypointIndices().get(i + 1)));
partitions.add(new Partition() {
@Override
public int size() {
return intervals.size();
}
@Override
public int getIntervalLength(int index) {
return intervals.get(index).end - intervals.get(index).start;
}
@Override
public void setInterval(int index, int start, int end) {
intervals.get(index).start = start;
intervals.get(index).end = end;
}
});
// todo: maybe this code can be simplified if path details and instructions would be merged, see #1121
if (enableInstructions) {
final InstructionList instructions = responsePath.getInstructions();
partitions.add(new Partition() {
@Override
public int size() {
return instructions.size();
}
@Override
public int getIntervalLength(int index) {
return instructions.get(index).getLength();
}
@Override
public void setInterval(int index, int start, int end) {
Instruction instruction = instructions.get(index);
if (instruction instanceof ViaInstruction || instruction instanceof FinishInstruction) {
if (start != end) {
throw new IllegalStateException("via- and finish-instructions are expected to have zero length");
}
// have to make sure that via instructions and finish instructions contain a single point
// even though their 'instruction length' is zero.
end++;
}
instruction.setPoints(pointList.shallowCopy(start, end, false));
}
});
}
for (final Map.Entry<String, List<PathDetail>> entry : responsePath.getPathDetails().entrySet()) {
// If the pointList only contains one point, PathDetails have to be empty because 1 point => 0 edges
final List<PathDetail> detail = entry.getValue();
if (detail.isEmpty() && pointList.size() > 1)
throw new IllegalStateException("PathDetails " + entry.getKey() + " must not be empty");
partitions.add(new Partition() {
@Override
public int size() {
return detail.size();
}
@Override
public int getIntervalLength(int index) {
return detail.get(index).getLength();
}
@Override
public void setInterval(int index, int start, int end) {
PathDetail pd = detail.get(index);
pd.setFirst(start);
pd.setLast(end);
}
});
}
simplify(responsePath.getPoints(), partitions, ramerDouglasPeucker);
List<Integer> simplifiedWaypointIndices = new ArrayList<>();
simplifiedWaypointIndices.add(intervals.get(0).start);
for (Interval interval : intervals)
simplifiedWaypointIndices.add(interval.end);
responsePath.setWaypointIndices(simplifiedWaypointIndices);
assertConsistencyOfPathDetails(responsePath.getPathDetails());
if (enableInstructions)
assertConsistencyOfInstructions(responsePath.getInstructions(), responsePath.getPoints().size());
return pointList;
}
|
@Test
public void testSinglePartition() {
// points are chosen such that DP will remove those marked with an x
// todo: we could go further and replace Ramer-Douglas-Peucker with some abstract thing that makes this easier to test
PointList points = new PointList();
points.add(48.89107, 9.33161); // 0 -> 0
points.add(48.89104, 9.33102); // 1 x
points.add(48.89100, 9.33024); // 2 x
points.add(48.89099, 9.33002); // 3 -> 1
points.add(48.89092, 9.32853); // 4 -> 2
points.add(48.89101, 9.32854); // 5 x
points.add(48.89242, 9.32865); // 6 -> 3
points.add(48.89343, 9.32878); // 7 -> 4
PointList origPoints = points.clone(false);
TestPartition partition = TestPartition.start()
.add(0, 3)
.add(3, 3) // via
.add(3, 3) // via (just added this to make the test harder)
.add(3, 4)
.add(4, 4) // via
.add(4, 7)
.add(7, 7); // end
List<PathSimplification.Partition> partitions = new ArrayList<>();
partitions.add(partition);
PathSimplification.simplify(points, partitions, new RamerDouglasPeucker());
// check points were modified correctly
assertEquals(5, points.size());
origPoints.set(1, Double.NaN, Double.NaN, Double.NaN);
origPoints.set(2, Double.NaN, Double.NaN, Double.NaN);
origPoints.set(5, Double.NaN, Double.NaN, Double.NaN);
RamerDouglasPeucker.removeNaN(origPoints);
assertEquals(origPoints, points);
// check partition was modified correctly
TestPartition expected = TestPartition.start()
.add(0, 1)
.add(1, 1)
.add(1, 1)
.add(1, 2)
.add(2, 2)
.add(2, 4)
.add(4, 4);
assertEquals(expected.intervals, partition.intervals);
}
|
@Override
public void yield() throws InterruptedException {
Mail mail = mailbox.take(priority);
try {
mail.run();
} catch (Exception ex) {
throw WrappingRuntimeException.wrapIfNecessary(ex);
}
}
|
@Test
void testYield() throws Exception {
final AtomicReference<Exception> exceptionReference = new AtomicReference<>();
final TestRunnable testRunnable = new TestRunnable();
final Thread submitThread =
new Thread(
() -> {
try {
mailboxExecutor.execute(testRunnable, "testRunnable");
} catch (Exception e) {
exceptionReference.set(e);
}
});
submitThread.start();
mailboxExecutor.yield();
submitThread.join();
assertThat(exceptionReference.get()).isNull();
assertThat(testRunnable.wasExecutedBy()).isEqualTo(Thread.currentThread());
}
|
public static HollowSchema parseSchema(String schema) throws IOException {
StreamTokenizer tokenizer = new StreamTokenizer(new StringReader(schema));
configureTokenizer(tokenizer);
return parseSchema(tokenizer);
}
|
@Test
public void parsesMapSchemaWithMultiFieldPrimaryKey() throws IOException {
String listSchema = "MapOfStringToTypeA Map<String, TypeA> @HashKey(id.value, region.country.id, key);\n";
HollowMapSchema schema = (HollowMapSchema) HollowSchemaParser.parseSchema(listSchema);
Assert.assertEquals("MapOfStringToTypeA", schema.getName());
Assert.assertEquals("String", schema.getKeyType());
Assert.assertEquals("TypeA", schema.getValueType());
Assert.assertEquals(new PrimaryKey("String", "id.value", "region.country.id", "key"), schema.getHashKey());
Assert.assertEquals(schema, HollowSchemaParser.parseSchema(schema.toString()));
}
|
public static <T> RetryTransformer<T> of(Retry retry) {
return new RetryTransformer<>(retry);
}
|
@Test
public void returnOnCompleteUsingMaybe() throws InterruptedException {
RetryConfig config = retryConfig();
Retry retry = Retry.of("testName", config);
RetryTransformer<Object> retryTransformer = RetryTransformer.of(retry);
given(helloWorldService.returnHelloWorld())
.willReturn("Hello world")
.willThrow(new HelloWorldException())
.willThrow(new HelloWorldException())
.willReturn("Hello world");
Maybe.fromCallable(helloWorldService::returnHelloWorld)
.compose(retryTransformer)
.test()
.await()
.assertValueCount(1)
.assertValues("Hello world")
.assertComplete();
Maybe.fromCallable(helloWorldService::returnHelloWorld)
.compose(retryTransformer)
.test()
.await()
.assertValueCount(1)
.assertValues("Hello world")
.assertComplete();
then(helloWorldService).should(times(4)).returnHelloWorld();
Retry.Metrics metrics = retry.getMetrics();
assertThat(metrics.getNumberOfSuccessfulCallsWithoutRetryAttempt()).isEqualTo(1);
assertThat(metrics.getNumberOfSuccessfulCallsWithRetryAttempt()).isEqualTo(1);
assertThat(metrics.getNumberOfFailedCallsWithRetryAttempt()).isZero();
assertThat(metrics.getNumberOfFailedCallsWithoutRetryAttempt()).isZero();
}
|
@GwtIncompatible("java.util.regex.Pattern")
public void containsMatch(@Nullable Pattern regex) {
checkNotNull(regex);
if (actual == null) {
failWithActual("expected a string that contains a match for", regex);
} else if (!regex.matcher(actual).find()) {
failWithActual("expected to contain a match for", regex);
}
}
|
@Test
@GwtIncompatible("Pattern")
public void stringContainsMatchPatternFailNull() {
expectFailureWhenTestingThat(null).containsMatch(Pattern.compile(".*b.*"));
assertFailureValue("expected a string that contains a match for", ".*b.*");
}
|
@Override
public InterpreterResult interpret(String cypherQuery, InterpreterContext interpreterContext) {
LOGGER.info("Opening session");
if (StringUtils.isBlank(cypherQuery)) {
return new InterpreterResult(Code.SUCCESS);
}
final List<String> queries = isMultiStatementEnabled ?
Arrays.asList(cypherQuery.split(";[^'|^\"|^(\\w+`)]")) : Arrays.asList(cypherQuery);
if (queries.size() == 1) {
final String query = queries.get(0);
return runQuery(query, interpreterContext);
} else {
final int lastIndex = queries.size() - 1;
final List<String> subQueries = queries.subList(0, lastIndex);
for (String query : subQueries) {
runQuery(query, interpreterContext);
}
return runQuery(queries.get(lastIndex), interpreterContext);
}
}
|
@Test
void testNodeDataTypes() throws IOException {
InterpreterResult result = interpreter.interpret(
"CREATE (n:NodeTypes{" +
"dateTime: datetime('2015-06-24T12:50:35.556+0100')," +
"point3d: point({ x:0, y:4, z:1 })})\n" +
"RETURN n",
context);
assertEquals(Code.SUCCESS, result.code());
ObjectMapper jsonMapper = new ObjectMapper();
GraphResult.Graph graph = jsonMapper.readValue(result.toString()
.replace(NETWORK_RESULT_PREFIX, StringUtils.EMPTY), GraphResult.Graph.class);
final Node node = graph.getNodes().iterator().next();
Map<String, Object> expectedMap = new HashMap<>();
expectedMap.put("point3d", "Point{srid=9157, x=0.0, y=4.0, z=1.0}");
expectedMap.put("dateTime", "2015-06-24T12:50:35.556+01:00");
assertEquals(expectedMap, node.getData());
interpreter.interpret("MATCH (n:NodeTypes) DETACH DELETE n", context);
}
|
@NonNull
public static String objectToJson(@NonNull Object source) {
Assert.notNull(source, "Source object must not be null");
try {
return DEFAULT_JSON_MAPPER.writeValueAsString(source);
} catch (JsonProcessingException e) {
throw new JsonParseException(e);
}
}
|
@Test
public void serializerTime() {
Instant now = Instant.now();
String instantStr = JsonUtils.objectToJson(now);
assertThat(instantStr).isNotNull();
String localDateTimeStr = JsonUtils.objectToJson(LocalDateTime.now());
assertThat(localDateTimeStr).isNotNull();
}
|
@Override
public List<ClusterNodeInfo> decode(ByteBuf buf, State state) throws IOException {
String response = buf.toString(CharsetUtil.UTF_8);
List<ClusterNodeInfo> nodes = new ArrayList<>();
for (String nodeInfo : response.split("\n")) {
ClusterNodeInfo node = new ClusterNodeInfo(nodeInfo);
String[] params = nodeInfo.split(" ");
String nodeId = params[0];
node.setNodeId(nodeId);
String flags = params[2];
for (String flag : flags.split(",")) {
for (Flag nodeInfoFlag : ClusterNodeInfo.Flag.values()) {
if (nodeInfoFlag.getValue().equalsIgnoreCase(flag)) {
node.addFlag(nodeInfoFlag);
break;
}
}
}
if (!node.containsFlag(Flag.NOADDR)) {
String protocol = "redis://";
if (ssl) {
protocol = "rediss://";
}
String addr = params[1].split("@")[0];
String name = addr.substring(0, addr.lastIndexOf(":"));
if (name.isEmpty()) {
// skip nodes with empty address
continue;
}
String uri = protocol + addr;
node.setAddress(uri);
}
String slaveOf = params[3];
if (!"-".equals(slaveOf)) {
node.setSlaveOf(slaveOf);
}
if (params.length > 8) {
for (int i = 0; i < params.length - 8; i++) {
String slots = params[i + 8];
if (slots.contains("-<-") || slots.contains("->-")) {
continue;
}
String[] parts = slots.split("-");
if (parts.length == 1) {
node.addSlotRange(new ClusterSlotRange(Integer.valueOf(parts[0]), Integer.valueOf(parts[0])));
} else if (parts.length == 2) {
node.addSlotRange(new ClusterSlotRange(Integer.valueOf(parts[0]), Integer.valueOf(parts[1])));
}
}
}
nodes.add(node);
}
return nodes;
}
|
@Test
public void test() throws IOException {
ClusterNodesDecoder decoder = new ClusterNodesDecoder(false);
ByteBuf buf = Unpooled.buffer();
String info = "7af253f8c20a3b3fbd481801bd361ec6643c6f0b 192.168.234.129:7001@17001 master - 0 1478865073260 8 connected 5461-10922\n" +
"a0d6a300f9f3b139c89cf45b75dbb7e4a01bb6b5 192.168.234.131:7005@17005 slave 5b00efb410f14ba5bb0a153c057e431d9ee4562e 0 1478865072251 5 connected\n" +
"454b8aaab7d8687822923da37a91fc0eecbe7a88 192.168.234.130:7002@17002 slave 7af253f8c20a3b3fbd481801bd361ec6643c6f0b 0 1478865072755 8 connected\n" +
"5b00efb410f14ba5bb0a153c057e431d9ee4562e 192.168.234.131:7004@17004 master - 0 1478865071746 5 connected 10923-16383\n" +
"14edcdebea55853533a24d5cdc560ecc06ec5295 192.168.234.130:7003@17003 myself,master - 0 0 7 connected 0-5460\n" +
"58d9f7c6d801aeebaf0e04e1aacb991e7e0ca8ff 192.168.234.129:7000@17000 slave 14edcdebea55853533a24d5cdc560ecc06ec5295 0 1478865071241 7 connected\n";
byte[] src = info.getBytes();
buf.writeBytes(src);
List<ClusterNodeInfo> nodes = decoder.decode(buf, null);
ClusterNodeInfo node = nodes.get(0);
Assertions.assertEquals("192.168.234.129", node.getAddress().getHost());
Assertions.assertEquals(7001, node.getAddress().getPort());
}
|
public static String getTypeStrFromProto(Descriptors.FieldDescriptor desc) {
switch (desc.getJavaType()) {
case INT:
return "Integer";
case LONG:
return "Long";
case STRING:
return "String";
case FLOAT:
return "Float";
case DOUBLE:
return "Double";
case BYTE_STRING:
return "ByteString";
case BOOLEAN:
return "Boolean";
case ENUM:
return getFullJavaNameForEnum(desc.getEnumType());
case MESSAGE:
if (desc.isMapField()) {
// map
final Descriptors.FieldDescriptor key = desc.getMessageType().findFieldByName("key");
final Descriptors.FieldDescriptor value = desc.getMessageType().findFieldByName("value");
// key and value cannot be repeated
String keyTypeStr = getTypeStrFromProto(key);
String valueTypeStr = getTypeStrFromProto(value);
return "Map<" + keyTypeStr + "," + valueTypeStr + ">";
} else {
// simple message
return getFullJavaName(desc.getMessageType());
}
default:
throw new RuntimeException("do not support field type: " + desc.getJavaType());
}
}
|
@Test(dataProvider = "typeCases")
public void testGetTypeStrFromProto(String fieldName, String javaType) {
Descriptors.FieldDescriptor fd = ComplexTypes.TestMessage.getDescriptor().findFieldByName(fieldName);
Assert.assertEquals(ProtoBufUtils.getTypeStrFromProto(fd), javaType);
}
|
public static Build withPropertyValue(String propertyValue) {
return new Builder(propertyValue);
}
|
@Test
void it_should_return_unknown_when_property_value_does_not_exist() {
//GIVEN
String unknownValue = "an_unknown_value";
//WHEN
ElasticsearchClientType esClientType =
ElasticsearchClientTypeBuilder.withPropertyValue(unknownValue).build();
//THEN
assertEquals(UNKNOWN, esClientType);
}
|
@Override
public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException {
return this.list(directory, listener, String.valueOf(Path.DELIMITER));
}
|
@Test
public void testListFile() throws Exception {
final Path container = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.volume, Path.Type.directory));
final String name = new AlphanumericRandomStringService().random();
final Path file = new S3TouchFeature(session, new S3AccessControlListFeature(session)).touch(new Path(container, name, EnumSet.of(Path.Type.file)), new TransferStatus());
try {
new S3ObjectListService(session, new S3AccessControlListFeature(session)).list(new Path(container, name, EnumSet.of(Path.Type.directory)), new DisabledListProgressListener());
fail();
}
catch(NotfoundException e) {
// Expected
}
new S3DefaultDeleteFeature(session).delete(Collections.singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
public AggregationType computeAggregationType(String name) {
return this.aggregationAssessor.computeAggregationType(name);
}
|
@Test
public void bridgeMethodsShouldBeIgnored() {
Orange orange = new Orange();
PropertySetter orangeSetter = new PropertySetter(new BeanDescriptionCache(context), orange);
assertEquals(AggregationType.AS_BASIC_PROPERTY,
orangeSetter.computeAggregationType(Citrus.PRECARP_PROPERTY_NAME));
assertEquals(AggregationType.AS_BASIC_PROPERTY,
orangeSetter.computeAggregationType(Citrus.PREFIX_PROPERTY_NAME));
StatusPrinter.print(context);
checker.assertIsWarningOrErrorFree();
}
|
public ByteBuffer getByteBuffer() {
ByteBuffer byteBuffer = ByteBuffer.allocate(32);
byteBuffer.putInt(0, hashCode);
byteBuffer.putInt(4, topicId);
byteBuffer.putInt(8, queueId);
byteBuffer.putLong(12, offset);
byteBuffer.putInt(20, size);
byteBuffer.putInt(24, timeDiff);
byteBuffer.putInt(28, itemIndex);
return byteBuffer;
}
|
@Test
public void getByteBufferTest() {
IndexItem indexItem = new IndexItem(topicId, queueId, offset, size, hashCode, timeDiff, itemIndex);
ByteBuffer byteBuffer = indexItem.getByteBuffer();
Assert.assertEquals(hashCode, byteBuffer.getInt(0));
Assert.assertEquals(topicId, byteBuffer.getInt(4));
Assert.assertEquals(queueId, byteBuffer.getInt(8));
Assert.assertEquals(offset, byteBuffer.getLong(12));
Assert.assertEquals(size, byteBuffer.getInt(20));
Assert.assertEquals(timeDiff, byteBuffer.getInt(24));
Assert.assertEquals(itemIndex, byteBuffer.getInt(28));
}
|
@Override
public boolean syncClientConnected(String clientId, ClientAttributes attributes) {
throw new UnsupportedOperationException("");
}
|
@Test
void makeSureSyncClientConnected() {
assertThrows(UnsupportedOperationException.class, () -> {
persistentIpPortClientManager.syncClientConnected(clientId, clientAttributes);
});
}
|
@Operation(summary = "createUser", description = "CREATE_USER_NOTES")
@Parameters({
@Parameter(name = "userName", description = "USER_NAME", required = true, schema = @Schema(implementation = String.class)),
@Parameter(name = "userPassword", description = "USER_PASSWORD", required = true, schema = @Schema(implementation = String.class)),
@Parameter(name = "tenantId", description = "TENANT_ID", required = true, schema = @Schema(implementation = int.class, example = "100")),
@Parameter(name = "queue", description = "QUEUE", schema = @Schema(implementation = String.class)),
@Parameter(name = "email", description = "EMAIL", required = true, schema = @Schema(implementation = String.class)),
@Parameter(name = "phone", description = "PHONE", schema = @Schema(implementation = String.class)),
@Parameter(name = "state", description = "STATE", schema = @Schema(implementation = int.class, example = "1"))
})
@PostMapping(value = "/create")
@ResponseStatus(HttpStatus.CREATED)
@ApiException(CREATE_USER_ERROR)
public Result createUser(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "userName") String userName,
@RequestParam(value = "userPassword") String userPassword,
@RequestParam(value = "tenantId") int tenantId,
@RequestParam(value = "queue", required = false, defaultValue = "") String queue,
@RequestParam(value = "email") String email,
@RequestParam(value = "phone", required = false) String phone,
@RequestParam(value = "state", required = false) int state) throws Exception {
Result verifyRet = usersService.verifyUserName(userName);
if (verifyRet.getCode() != Status.SUCCESS.getCode()) {
return verifyRet;
}
Map<String, Object> result =
usersService.createUser(loginUser, userName, userPassword, email, tenantId, phone, queue, state);
return returnDataList(result);
}
|
@Test
public void testCreateUser() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("userName", "user_test");
paramsMap.add("userPassword", "123456qwe?");
paramsMap.add("tenantId", "109");
paramsMap.add("queue", "1");
paramsMap.add("email", "12343534@qq.com");
paramsMap.add("phone", "15800000000");
MvcResult mvcResult = mockMvc.perform(post("/users/create")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assertions.assertEquals(Status.CREATE_USER_ERROR.getCode(), result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
|
public final void isZero() {
if (actual == null || actual.floatValue() != 0.0f) {
failWithActual(simpleFact("expected zero"));
}
}
|
@Test
public void isZero() {
assertThat(0.0f).isZero();
assertThat(-0.0f).isZero();
assertThatIsZeroFails(Float.MIN_VALUE);
assertThatIsZeroFails(-1.23f);
assertThatIsZeroFails(Float.POSITIVE_INFINITY);
assertThatIsZeroFails(Float.NaN);
assertThatIsZeroFails(null);
}
|
@Override
public boolean supportsDifferentTableCorrelationNames() {
return false;
}
|
@Test
void assertSupportsDifferentTableCorrelationNames() {
assertFalse(metaData.supportsDifferentTableCorrelationNames());
}
|
public CeTaskMessageDto setMessage(String message) {
checkArgument(message != null && !message.isEmpty(), "message can't be null nor empty");
this.message = abbreviate(message, MAX_MESSAGE_SIZE);
return this;
}
|
@Test
void setMessage_fails_with_IAE_if_argument_is_empty() {
assertThatThrownBy(() -> underTest.setMessage(""))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("message can't be null nor empty");
}
|
@Override
public boolean contains(K name) {
return get(name) != null;
}
|
@Test
public void testContains() {
TestDefaultHeaders headers = newInstance();
headers.addBoolean(of("boolean"), true);
assertTrue(headers.containsBoolean(of("boolean"), true));
assertFalse(headers.containsBoolean(of("boolean"), false));
headers.addLong(of("long"), Long.MAX_VALUE);
assertTrue(headers.containsLong(of("long"), Long.MAX_VALUE));
assertFalse(headers.containsLong(of("long"), Long.MIN_VALUE));
headers.addInt(of("int"), Integer.MIN_VALUE);
assertTrue(headers.containsInt(of("int"), Integer.MIN_VALUE));
assertFalse(headers.containsInt(of("int"), Integer.MAX_VALUE));
headers.addShort(of("short"), Short.MAX_VALUE);
assertTrue(headers.containsShort(of("short"), Short.MAX_VALUE));
assertFalse(headers.containsShort(of("short"), Short.MIN_VALUE));
headers.addChar(of("char"), Character.MAX_VALUE);
assertTrue(headers.containsChar(of("char"), Character.MAX_VALUE));
assertFalse(headers.containsChar(of("char"), Character.MIN_VALUE));
headers.addByte(of("byte"), Byte.MAX_VALUE);
assertTrue(headers.containsByte(of("byte"), Byte.MAX_VALUE));
assertFalse(headers.containsLong(of("byte"), Byte.MIN_VALUE));
headers.addDouble(of("double"), Double.MAX_VALUE);
assertTrue(headers.containsDouble(of("double"), Double.MAX_VALUE));
assertFalse(headers.containsDouble(of("double"), Double.MIN_VALUE));
headers.addFloat(of("float"), Float.MAX_VALUE);
assertTrue(headers.containsFloat(of("float"), Float.MAX_VALUE));
assertFalse(headers.containsFloat(of("float"), Float.MIN_VALUE));
long millis = System.currentTimeMillis();
headers.addTimeMillis(of("millis"), millis);
assertTrue(headers.containsTimeMillis(of("millis"), millis));
// This test doesn't work on midnight, January 1, 1970 UTC
assertFalse(headers.containsTimeMillis(of("millis"), 0));
headers.addObject(of("object"), "Hello World");
assertTrue(headers.containsObject(of("object"), "Hello World"));
assertFalse(headers.containsObject(of("object"), ""));
headers.add(of("name"), of("value"));
assertTrue(headers.contains(of("name"), of("value")));
assertFalse(headers.contains(of("name"), of("value1")));
}
|
String getPasswordFromCredentialProviders(
Configuration config, String alias, String defaultPass) {
String password = defaultPass;
try {
char[] passchars = config.getPasswordFromCredentialProviders(alias);
if (passchars != null) {
password = new String(passchars);
}
} catch (IOException ioe) {
LOG.warn("Exception while trying to get password for alias {}: ",
alias, ioe);
}
return password;
}
|
@Test
public void testConfGetPasswordUsingAlias() throws Exception {
File testDir = GenericTestUtils.getTestDir();
Configuration conf = getBaseConf();
final Path jksPath = new Path(testDir.toString(), "test.jks");
final String ourUrl =
JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri();
File file = new File(testDir, "test.jks");
file.delete();
conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, ourUrl);
// Set alias
String bindpassAlias = "bindpassAlias";
conf.set(LdapGroupsMapping.BIND_PASSWORD_ALIAS_KEY, bindpassAlias);
CredentialProvider provider =
CredentialProviderFactory.getProviders(conf).get(0);
char[] bindpass = "bindpass".toCharArray();
// Ensure that we get null when the key isn't there
assertNull(provider.getCredentialEntry(bindpassAlias));
// Create credential for the alias
provider.createCredentialEntry(bindpassAlias, bindpass);
provider.flush();
// Make sure we get back the right key
assertArrayEquals(bindpass, provider.getCredentialEntry(
bindpassAlias).getCredential());
LdapGroupsMapping mapping = new LdapGroupsMapping();
Assert.assertEquals("bindpass",
mapping.getPasswordFromCredentialProviders(conf, bindpassAlias, ""));
// Empty for an invalid alias
Assert.assertEquals("", mapping.getPasswordFromCredentialProviders(
conf, "invalid-alias", ""));
}
|
public String getInverseAddressFormat(String ipAddress) {
ipAddress = StringUtils.trim(ipAddress);
validateIpAddress(ipAddress);
LOG.debug("Preparing inverse format for IP address [{}]", ipAddress);
// Detect what type of address is provided (IPv4 or IPv6)
if (isIp6Address(ipAddress)) {
LOG.debug("[{}] is an IPv6 address", ipAddress);
/* Build reverse IPv6 address string with correct ip6.arpa suffix.
* All hex nibbles from the full address should be reversed (with dots in between)
* and the ip6.arpa suffix added to the end.
*
* For example, the reverse format for the address 2604:a880:800:10::7a1:b001 is
* 1.0.0.b.1.a.7.0.0.0.0.0.0.0.0.0.0.1.0.0.0.0.8.0.0.8.8.a.4.0.6.2.ip6.arpa
* See https://www.dnscheck.co/ptr-record-monitor for more info. */
// Parse the full address as an InetAddress to allow the full address bytes (16 bytes/128 bits) to be obtained.
final byte[] addressBytes = InetAddresses.forString(ipAddress).getAddress();
if (addressBytes.length > 16) {
throw new IllegalArgumentException(String.format(Locale.ENGLISH, "[%s] is an invalid IPv6 address", ipAddress));
}
// Convert the raw address bytes to hex.
final char[] resolvedHex = new char[addressBytes.length * 2];
for (int i = 0; i < addressBytes.length; i++) {
final int v = addressBytes[i] & 0xFF;
resolvedHex[i * 2] = HEX_CHARS_ARRAY[v >>> 4];
resolvedHex[i * 2 + 1] = HEX_CHARS_ARRAY[v & 0x0F];
}
final String fullHexAddress = new String(resolvedHex).toLowerCase(Locale.ENGLISH);
final String[] reversedAndSplit = new StringBuilder(fullHexAddress).reverse().toString().split("");
final String invertedAddress = Joiner.on(".").join(reversedAndSplit);
LOG.debug("Inverted address [{}] built for [{}]", invertedAddress, ipAddress);
return invertedAddress + IP_6_REVERSE_SUFFIX;
} else {
LOG.debug("[{}] is an IPv4 address", ipAddress);
/* Build reverse IPv4 address string with correct in-addr.arpa suffix.
* All octets should be reversed and the ip6.arpa suffix added to the end.
*
* For example, the reverse format for the address 10.20.30.40 is
* 40.30.20.10.in-addr.arpa */
final String[] octets = ipAddress.split("\\.");
final String invertedAddress = octets[3] + "." + octets[2] + "." + octets[1] + "." + octets[0] + IP_4_REVERSE_SUFFIX;
LOG.debug("Inverted address [{}] built for [{}]", invertedAddress, ipAddress);
return invertedAddress;
}
}
|
@Test
public void testReverseIpFormat() {
DnsClient dnsClient = new DnsClient(5000);
// Test IPv4 reverse format.
assertEquals("40.30.20.10.in-addr.arpa.", dnsClient.getInverseAddressFormat("10.20.30.40"));
// Test IPv6 reverse format.
assertEquals("1.0.0.b.1.a.7.0.0.0.0.0.0.0.0.0.0.1.0.0.0.0.8.0.0.8.8.a.4.0.6.2.ip6.arpa.",
dnsClient.getInverseAddressFormat("2604:a880:800:10::7a1:b001"));
}
|
@Override
public void setCurrentKey(Object key) {}
|
@SuppressWarnings("LockNotBeforeTry")
@Test
public void testEnsureStateCleanupWithKeyedInputCleanupTimer() {
InMemoryTimerInternals inMemoryTimerInternals = new InMemoryTimerInternals();
KeyedStateBackend keyedStateBackend = Mockito.mock(KeyedStateBackend.class);
Lock stateBackendLock = Mockito.mock(Lock.class);
StringUtf8Coder keyCoder = StringUtf8Coder.of();
IntervalWindow window = new IntervalWindow(new Instant(0), new Instant(10));
Coder<IntervalWindow> windowCoder = IntervalWindow.getCoder();
// Test that cleanup timer is set correctly
ExecutableStageDoFnOperator.CleanupTimer cleanupTimer =
new ExecutableStageDoFnOperator.CleanupTimer<>(
inMemoryTimerInternals,
stateBackendLock,
WindowingStrategy.globalDefault(),
keyCoder,
windowCoder,
keyedStateBackend);
cleanupTimer.setForWindow(KV.of("key", "string"), window);
Mockito.verify(stateBackendLock).lock();
ByteBuffer key = FlinkKeyUtils.encodeKey("key", keyCoder);
Mockito.verify(keyedStateBackend).setCurrentKey(key);
assertThat(
inMemoryTimerInternals.getNextTimer(TimeDomain.EVENT_TIME),
is(window.maxTimestamp().plus(Duration.millis(1))));
Mockito.verify(stateBackendLock).unlock();
}
|
@SuppressWarnings("ReturnOfNull")
@Override
public String getCatalog() {
try {
return connection.getCatalog();
} catch (final SQLException ignored) {
return null;
}
}
|
@Test
void assertGetCatalogReturnNullWhenThrowsSQLException() throws SQLException {
when(connection.getCatalog()).thenThrow(SQLException.class);
MetaDataLoaderConnection connection = new MetaDataLoaderConnection(databaseType, this.connection);
assertNull(connection.getCatalog());
}
|
@Override
public SofaResponse invoke(FilterInvoker invoker, SofaRequest request) throws SofaRpcException {
// Now only support sync invoke.
if (request.getInvokeType() != null && !RpcConstants.INVOKER_TYPE_SYNC.equals(request.getInvokeType())) {
return invoker.invoke(request);
}
String callerApp = getApplicationName(request);
String interfaceResourceName = getInterfaceResourceName(request);
String methodResourceName = getMethodResourceName(request);
Entry interfaceEntry = null;
Entry methodEntry = null;
try {
ContextUtil.enter(methodResourceName, callerApp);
interfaceEntry = SphU.entry(interfaceResourceName, ResourceTypeConstants.COMMON_RPC, EntryType.IN);
methodEntry = SphU.entry(methodResourceName, ResourceTypeConstants.COMMON_RPC,
EntryType.IN, getMethodArguments(request));
SofaResponse response = invoker.invoke(request);
traceResponseException(response, interfaceEntry, methodEntry);
return response;
} catch (BlockException e) {
return SofaRpcFallbackRegistry.getProviderFallback().handle(invoker, request, e);
} catch (Throwable t) {
throw traceOtherException(t, interfaceEntry, methodEntry);
} finally {
if (methodEntry != null) {
methodEntry.exit(1, getMethodArguments(request));
}
if (interfaceEntry != null) {
interfaceEntry.exit();
}
ContextUtil.exit();
}
}
|
@Test
public void testInvokeSentinelWorks() {
SentinelSofaRpcProviderFilter filter = new SentinelSofaRpcProviderFilter();
final String applicationName = "demo-provider";
final String interfaceResourceName = "com.alibaba.csp.sentinel.adapter.sofa.rpc.service.DemoService";
final String methodResourceName = "com.alibaba.csp.sentinel.adapter.sofa.rpc.service.DemoService#sayHello(java.lang.String,int)";
SofaRequest request = mock(SofaRequest.class);
when(request.getRequestProp("app")).thenReturn(applicationName);
when(request.getInvokeType()).thenReturn(RpcConstants.INVOKER_TYPE_SYNC);
when(request.getInterfaceName()).thenReturn(interfaceResourceName);
when(request.getMethodName()).thenReturn("sayHello");
when(request.getMethodArgSigs()).thenReturn(new String[]{"java.lang.String", "int"});
when(request.getMethodArgs()).thenReturn(new Object[]{"Sentinel", 2020});
FilterInvoker filterInvoker = mock(FilterInvoker.class);
when(filterInvoker.invoke(request)).thenAnswer(new Answer<SofaResponse>() {
@Override
public SofaResponse answer(InvocationOnMock invocationOnMock) throws Throwable {
verifyInvocationStructure(applicationName, interfaceResourceName, methodResourceName);
SofaResponse response = new SofaResponse();
response.setAppResponse("Hello Sentinel 2020");
return response;
}
});
// Before invoke
assertNull(ContextUtil.getContext());
// Do invoke
SofaResponse response = filter.invoke(filterInvoker, request);
assertEquals("Hello Sentinel 2020", response.getAppResponse());
verify(filterInvoker).invoke(request);
// After invoke, make sure exit context
assertNull(ContextUtil.getContext());
}
|
@Override
public void registerService(String serviceName, String groupName, Instance instance) throws NacosException {
NAMING_LOGGER.info("[REGISTER-SERVICE] {} registering service {} with instance: {}", namespaceId, serviceName,
instance);
String groupedServiceName = NamingUtils.getGroupedName(serviceName, groupName);
if (instance.isEphemeral()) {
throw new UnsupportedOperationException(
"Do not support register ephemeral instances by HTTP, please use gRPC replaced.");
}
final Map<String, String> params = new HashMap<>(32);
params.put(CommonParams.NAMESPACE_ID, namespaceId);
params.put(CommonParams.SERVICE_NAME, groupedServiceName);
params.put(CommonParams.GROUP_NAME, groupName);
params.put(CommonParams.CLUSTER_NAME, instance.getClusterName());
params.put(IP_PARAM, instance.getIp());
params.put(PORT_PARAM, String.valueOf(instance.getPort()));
params.put(WEIGHT_PARAM, String.valueOf(instance.getWeight()));
params.put(REGISTER_ENABLE_PARAM, String.valueOf(instance.isEnabled()));
params.put(HEALTHY_PARAM, String.valueOf(instance.isHealthy()));
params.put(EPHEMERAL_PARAM, String.valueOf(instance.isEphemeral()));
params.put(META_PARAM, JacksonUtils.toJson(instance.getMetadata()));
reqApi(UtilAndComs.nacosUrlInstance, params, HttpMethod.POST);
}
|
@Test
void testRegisterServiceThrowsException() throws Exception {
assertThrows(NacosException.class, () -> {
NacosRestTemplate nacosRestTemplate = mock(NacosRestTemplate.class);
HttpRestResult<Object> a = new HttpRestResult<Object>();
a.setCode(503);
// makes exchangeForm failed with a NullPointerException
when(nacosRestTemplate.exchangeForm(any(), any(), any(), any(), any(), any())).thenReturn(null);
final Field nacosRestTemplateField = NamingHttpClientProxy.class.getDeclaredField("nacosRestTemplate");
nacosRestTemplateField.setAccessible(true);
nacosRestTemplateField.set(clientProxy, nacosRestTemplate);
String serviceName = "service1";
String groupName = "group1";
Instance instance = new Instance();
instance.setEphemeral(false);
try {
clientProxy.registerService(serviceName, groupName, instance);
} catch (NacosException ex) {
// verify the `NacosException` is directly thrown
assertTrue(ex.getErrMsg().contains("java.lang.NullPointerException"));
assertEquals(NacosException.SERVER_ERROR, ex.getErrCode());
throw ex;
}
});
}
|
public static void warn(final Logger logger, final String format, final Supplier<Object> supplier) {
if (logger.isWarnEnabled()) {
logger.warn(format, supplier.get());
}
}
|
@Test
public void testNeverWarnWithFormat() {
when(logger.isWarnEnabled()).thenReturn(false);
LogUtils.warn(logger, "testWarn: {}", supplier);
verify(supplier, never()).get();
}
|
@Override
public void handleEventFromOperator(int subtask, int attemptNumber, OperatorEvent event) {
DynamicFilteringData currentData =
((DynamicFilteringEvent) ((SourceEventWrapper) event).getSourceEvent()).getData();
if (receivedFilteringData == null) {
receivedFilteringData = currentData;
} else {
// Since there might be speculative execution or failover, we may receive multiple
// notifications, and we can't tell for sure which one is valid for further processing.
if (DynamicFilteringData.isEqual(receivedFilteringData, currentData)) {
// If the notifications contain exactly the same data, everything is alright, and
// we don't need to send the event again.
return;
} else {
// In case the mismatching of the source filtering result and the dim data, which
// may leads to incorrect result, trigger global failover for fully recomputing.
throw new IllegalStateException(
"DynamicFilteringData is recomputed but not equal. "
+ "Triggering global failover in case the result is incorrect. "
+ " It's recommended to re-run the job with dynamic filtering disabled.");
}
}
for (String listenerID : dynamicFilteringDataListenerIDs) {
coordinatorStore.compute(
listenerID,
(key, oldValue) -> {
// The value for a listener ID can be a source coordinator listening to an
// event, or an event waiting to be retrieved
if (oldValue == null || oldValue instanceof OperatorEvent) {
// If the listener has not been registered, or after a global failover
// without cleanup the store, we simply update it to the latest value.
// The listener coordinator would retrieve the event once it's started.
LOG.info(
"Updating event {} before the source coordinator with ID {} is registered",
event,
listenerID);
return event;
} else {
checkState(
oldValue instanceof OperatorCoordinator,
"The existing value for "
+ listenerID
+ "is expected to be an operator coordinator, but it is in fact "
+ oldValue);
LOG.info(
"Distributing event {} to source coordinator with ID {}",
event,
listenerID);
try {
// Subtask index and attempt number is not necessary for handling
// DynamicFilteringEvent.
((OperatorCoordinator) oldValue)
.handleEventFromOperator(0, 0, event);
} catch (Exception e) {
ExceptionUtils.rethrow(e);
}
// Dynamic filtering event is expected to be sent only once. So after
// the coordinator is notified, it can be removed from the store.
return null;
}
});
}
}
|
@Test
void testRedistributeData() throws Exception {
MockOperatorCoordinatorContext context =
new MockOperatorCoordinatorContext(new OperatorID(), 1);
String listenerID1 = "test-listener-1";
String listenerID2 = "test-listener-2";
TestingOperatorCoordinator listener1 = new TestingOperatorCoordinator(context);
TestingOperatorCoordinator listener2 = new TestingOperatorCoordinator(context);
context.getCoordinatorStore().putIfAbsent(listenerID1, listener1);
context.getCoordinatorStore().putIfAbsent(listenerID2, listener2);
RowType rowType = RowType.of(new IntType());
OperatorEvent testEvent = dynamicFilteringEvent(rowType, Collections.emptyList());
try (DynamicFilteringDataCollectorOperatorCoordinator coordinator =
new DynamicFilteringDataCollectorOperatorCoordinator(
context, Arrays.asList(listenerID1, listenerID2))) {
coordinator.handleEventFromOperator(0, 1, testEvent);
}
assertThat(listener1.getNextReceivedOperatorEvent()).isSameAs(testEvent);
assertThat(listener1.getNextReceivedOperatorEvent()).isNull();
assertThat(listener2.getNextReceivedOperatorEvent()).isSameAs(testEvent);
assertThat(listener2.getNextReceivedOperatorEvent()).isNull();
}
|
public void parse(InputStream stream, ContentHandler handler, Metadata metadata,
ParseContext context) throws IOException, SAXException, TikaException {
if (stream == null) {
throw new NullPointerException("null stream");
}
Throwable t;
boolean alive = false;
ForkClient client = acquireClient();
try {
ContentHandler tee =
(handler instanceof AbstractRecursiveParserWrapperHandler) ? handler :
new TeeContentHandler(handler, new MetadataContentHandler(metadata));
t = client.call("parse", stream, tee, metadata, context);
alive = true;
} catch (TikaException te) {
// Problem occurred on our side
alive = true;
throw te;
} catch (IOException e) {
// Problem occurred on the other side
throw new TikaException("Failed to communicate with a forked parser process." +
" The process has most likely crashed due to some error" +
" like running out of memory. A new process will be" +
" started for the next parsing request.", e);
} finally {
releaseClient(client, alive);
}
if (t instanceof IOException) {
throw (IOException) t;
} else if (t instanceof SAXException) {
throw (SAXException) t;
} else if (t instanceof TikaException) {
throw (TikaException) t;
} else if (t != null) {
throw new TikaException("Unexpected error in forked server process", t);
}
}
|
@Test
public void testParallelParsing() throws Exception {
try (ForkParser parser = new ForkParser(ForkParserTest.class.getClassLoader(),
new ForkTestParser())) {
final ParseContext context = new ParseContext();
Thread[] threads = new Thread[10];
ContentHandler[] output = new ContentHandler[threads.length];
for (int i = 0; i < threads.length; i++) {
final ContentHandler o = new BodyContentHandler();
output[i] = o;
threads[i] = new Thread(() -> {
try {
InputStream stream = new ByteArrayInputStream(new byte[0]);
parser.parse(stream, o, new Metadata(), context);
} catch (Exception e) {
e.printStackTrace();
}
});
threads[i].start();
}
for (int i = 0; i < threads.length; i++) {
threads[i].join();
assertEquals("Hello, World!", output[i].toString().trim());
}
}
}
|
public Properties createProperties(Props props, File logDir) {
Log4JPropertiesBuilder log4JPropertiesBuilder = new Log4JPropertiesBuilder(props);
RootLoggerConfig config = newRootLoggerConfigBuilder()
.setNodeNameField(getNodeNameWhenCluster(props))
.setProcessId(ProcessId.ELASTICSEARCH)
.build();
String logPattern = log4JPropertiesBuilder.buildLogPattern(config);
return log4JPropertiesBuilder.internalLogLevel(Level.ERROR)
.rootLoggerConfig(config)
.logPattern(logPattern)
.enableAllLogsToConsole(isAllLogsToConsoleEnabled(props))
.jsonOutput(isJsonOutput(props))
.logDir(logDir)
.logLevelConfig(
LogLevelConfig.newBuilder(log4JPropertiesBuilder.getRootLoggerName())
.rootLevelFor(ProcessId.ELASTICSEARCH)
.build())
.build();
}
|
@Test
public void createProperties_sets_root_logger_to_INFO_if_no_property_is_set() throws IOException {
File logDir = temporaryFolder.newFolder();
Properties properties = underTest.createProperties(newProps(), logDir);
assertThat(properties.getProperty("rootLogger.level")).isEqualTo("INFO");
}
|
@Override
public void setKeyboardTheme(@NonNull KeyboardTheme theme) {
super.setKeyboardTheme(theme);
mExtensionKeyboardYDismissPoint = getThemedKeyboardDimens().getNormalKeyHeight();
mGestureDrawingHelper =
GestureTypingPathDrawHelper.create(
this::invalidate,
GestureTrailTheme.fromThemeResource(
getContext(),
theme.getPackageContext(),
theme.getResourceMapping(),
theme.getGestureTrailThemeResId()));
}
|
@Test
public void testMinimumPadding() {
final Resources resources = mViewUnderTest.getContext().getResources();
final int minimumBottomPadding =
resources.getDimensionPixelOffset(R.dimen.watermark_margin)
+ resources.getDimensionPixelOffset(R.dimen.watermark_size);
Assert.assertTrue(
"Expected minimumBottomPadding to be larger than 1, but is " + minimumBottomPadding,
1 < minimumBottomPadding);
AnyApplication.getKeyboardThemeFactory(mViewUnderTest.getContext())
.getEnabledAddOns()
.forEach(
keyboardTheme -> {
mViewUnderTest.setKeyboardTheme(keyboardTheme);
Assert.assertSame(keyboardTheme, mViewUnderTest.getLastSetKeyboardTheme());
Assert.assertTrue(mViewUnderTest.getPaddingBottom() >= minimumBottomPadding);
});
}
|
@Override
public boolean isEmpty() {
reap();
return backingStore.isEmpty();
}
|
@Test
void stressMap() {
for (int i = 1; i <= TEST_SIZE; i++) {
data.add("Data_" + i);
}
List<Thread> threads = new ArrayList<>(80);
for (int i = 0; i <= 80; i++) {
final int seed = (i + 1) * 100;
Runnable runnable = () -> rundata(seed);
Thread t = new Thread(runnable);
threads.add(t);
}
threads.forEach(Thread::start);
threads.forEach((Thread t) -> {
try {
t.join();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
});
Assertions.assertTrue(exceptions.isEmpty());
}
|
public static File file(String path) {
if (null == path) {
return null;
}
return new File(getAbsolutePath(path));
}
|
@Test
public void fileTest1() {
final File file = FileUtil.file("d:/aaa", "bbb");
assertNotNull(file);
}
|
public static SinkConfig validateUpdate(SinkConfig existingConfig, SinkConfig newConfig) {
SinkConfig mergedConfig = clone(existingConfig);
if (!existingConfig.getTenant().equals(newConfig.getTenant())) {
throw new IllegalArgumentException("Tenants differ");
}
if (!existingConfig.getNamespace().equals(newConfig.getNamespace())) {
throw new IllegalArgumentException("Namespaces differ");
}
if (!existingConfig.getName().equals(newConfig.getName())) {
throw new IllegalArgumentException("Sink Names differ");
}
if (!StringUtils.isEmpty(newConfig.getClassName())) {
mergedConfig.setClassName(newConfig.getClassName());
}
if (!StringUtils.isEmpty(newConfig.getSourceSubscriptionName()) && !newConfig.getSourceSubscriptionName()
.equals(existingConfig.getSourceSubscriptionName())) {
throw new IllegalArgumentException("Subscription Name cannot be altered");
}
if (newConfig.getInputSpecs() == null) {
newConfig.setInputSpecs(new HashMap<>());
}
if (mergedConfig.getInputSpecs() == null) {
mergedConfig.setInputSpecs(new HashMap<>());
}
if (!StringUtils.isEmpty(newConfig.getLogTopic())) {
mergedConfig.setLogTopic(newConfig.getLogTopic());
}
if (newConfig.getInputs() != null) {
newConfig.getInputs().forEach((topicName -> {
newConfig.getInputSpecs().putIfAbsent(topicName,
ConsumerConfig.builder().isRegexPattern(false).build());
}));
}
if (newConfig.getTopicsPattern() != null && !newConfig.getTopicsPattern().isEmpty()) {
newConfig.getInputSpecs().put(newConfig.getTopicsPattern(),
ConsumerConfig.builder()
.isRegexPattern(true)
.build());
}
if (newConfig.getTopicToSerdeClassName() != null) {
newConfig.getTopicToSerdeClassName().forEach((topicName, serdeClassName) -> {
newConfig.getInputSpecs().put(topicName,
ConsumerConfig.builder()
.serdeClassName(serdeClassName)
.isRegexPattern(false)
.build());
});
}
if (newConfig.getTopicToSchemaType() != null) {
newConfig.getTopicToSchemaType().forEach((topicName, schemaClassname) -> {
newConfig.getInputSpecs().put(topicName,
ConsumerConfig.builder()
.schemaType(schemaClassname)
.isRegexPattern(false)
.build());
});
}
if (!newConfig.getInputSpecs().isEmpty()) {
SinkConfig finalMergedConfig = mergedConfig;
newConfig.getInputSpecs().forEach((topicName, consumerConfig) -> {
if (!existingConfig.getInputSpecs().containsKey(topicName)) {
throw new IllegalArgumentException("Input Topics cannot be altered");
}
if (consumerConfig.isRegexPattern() != existingConfig.getInputSpecs().get(topicName).isRegexPattern()) {
throw new IllegalArgumentException(
"isRegexPattern for input topic " + topicName + " cannot be altered");
}
finalMergedConfig.getInputSpecs().put(topicName, consumerConfig);
});
}
if (newConfig.getProcessingGuarantees() != null && !newConfig.getProcessingGuarantees()
.equals(existingConfig.getProcessingGuarantees())) {
throw new IllegalArgumentException("Processing Guarantees cannot be altered");
}
if (newConfig.getConfigs() != null) {
mergedConfig.setConfigs(newConfig.getConfigs());
}
if (newConfig.getSecrets() != null) {
mergedConfig.setSecrets(newConfig.getSecrets());
}
if (newConfig.getParallelism() != null) {
mergedConfig.setParallelism(newConfig.getParallelism());
}
if (newConfig.getRetainOrdering() != null && !newConfig.getRetainOrdering()
.equals(existingConfig.getRetainOrdering())) {
throw new IllegalArgumentException("Retain Ordering cannot be altered");
}
if (newConfig.getRetainKeyOrdering() != null && !newConfig.getRetainKeyOrdering()
.equals(existingConfig.getRetainKeyOrdering())) {
throw new IllegalArgumentException("Retain Key Ordering cannot be altered");
}
if (newConfig.getAutoAck() != null && !newConfig.getAutoAck().equals(existingConfig.getAutoAck())) {
throw new IllegalArgumentException("AutoAck cannot be altered");
}
if (newConfig.getResources() != null) {
mergedConfig
.setResources(ResourceConfigUtils.merge(existingConfig.getResources(), newConfig.getResources()));
}
if (newConfig.getTimeoutMs() != null) {
mergedConfig.setTimeoutMs(newConfig.getTimeoutMs());
}
if (newConfig.getCleanupSubscription() != null) {
mergedConfig.setCleanupSubscription(newConfig.getCleanupSubscription());
}
if (!StringUtils.isEmpty(newConfig.getArchive())) {
mergedConfig.setArchive(newConfig.getArchive());
}
if (!StringUtils.isEmpty(newConfig.getRuntimeFlags())) {
mergedConfig.setRuntimeFlags(newConfig.getRuntimeFlags());
}
if (!StringUtils.isEmpty(newConfig.getCustomRuntimeOptions())) {
mergedConfig.setCustomRuntimeOptions(newConfig.getCustomRuntimeOptions());
}
if (newConfig.getTransformFunction() != null) {
mergedConfig.setTransformFunction(newConfig.getTransformFunction());
}
if (newConfig.getTransformFunctionClassName() != null) {
mergedConfig.setTransformFunctionClassName(newConfig.getTransformFunctionClassName());
}
if (newConfig.getTransformFunctionConfig() != null) {
mergedConfig.setTransformFunctionConfig(newConfig.getTransformFunctionConfig());
}
return mergedConfig;
}
|
@Test
public void testMergeDifferentTransformFunctionClassName() {
SinkConfig sinkConfig = createSinkConfig();
String newFunctionClassName = "NewTransformFunction";
SinkConfig newSinkConfig = createUpdatedSinkConfig("transformFunctionClassName", newFunctionClassName);
SinkConfig mergedConfig = SinkConfigUtils.validateUpdate(sinkConfig, newSinkConfig);
assertEquals(
mergedConfig.getTransformFunctionClassName(),
newFunctionClassName
);
mergedConfig.setTransformFunctionClassName(sinkConfig.getTransformFunctionClassName());
assertEquals(
new Gson().toJson(sinkConfig),
new Gson().toJson(mergedConfig)
);
}
|
@Override
@MethodNotAvailable
public Map<K, Object> executeOnEntries(com.hazelcast.map.EntryProcessor entryProcessor) {
throw new MethodNotAvailableException();
}
|
@Test(expected = MethodNotAvailableException.class)
public void testExecuteOnEntriesWithPredicate() {
adapter.executeOnEntries(new IMapReplaceEntryProcessor("value", "newValue"), Predicates.alwaysTrue());
}
|
@Override
public boolean add(final Integer value) {
return add(value.intValue());
}
|
@Test
public void setsWithTheSameValuesAreEqual() {
final IntHashSet other = new IntHashSet(100, -1);
set.add(1);
set.add(1001);
other.add(1);
other.add(1001);
assertEquals(set, other);
}
|
@Override
public RemoveMembersFromConsumerGroupResult removeMembersFromConsumerGroup(String groupId,
RemoveMembersFromConsumerGroupOptions options) {
String reason = options.reason() == null || options.reason().isEmpty() ?
DEFAULT_LEAVE_GROUP_REASON : JoinGroupRequest.maybeTruncateReason(options.reason());
List<MemberIdentity> members;
if (options.removeAll()) {
members = getMembersFromGroup(groupId, reason);
} else {
members = options.members().stream()
.map(m -> m.toMemberIdentity().setReason(reason))
.collect(Collectors.toList());
}
SimpleAdminApiFuture<CoordinatorKey, Map<MemberIdentity, Errors>> future =
RemoveMembersFromConsumerGroupHandler.newFuture(groupId);
RemoveMembersFromConsumerGroupHandler handler = new RemoveMembersFromConsumerGroupHandler(groupId, members, logContext);
invokeDriver(handler, future, options.timeoutMs);
return new RemoveMembersFromConsumerGroupResult(future.get(CoordinatorKey.byGroupId(groupId)), options.members());
}
|
@Test
public void testRemoveMembersFromGroupRetriableErrors() throws Exception {
// Retriable errors should be retried
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(
prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
env.kafkaClient().prepareResponse(
new LeaveGroupResponse(new LeaveGroupResponseData()
.setErrorCode(Errors.COORDINATOR_LOAD_IN_PROGRESS.code())));
/*
* We need to return two responses here, one for NOT_COORDINATOR call when calling remove member
* api using coordinator that has moved. This will retry whole operation. So we need to again respond with a
* FindCoordinatorResponse.
*
* And the same reason for the following COORDINATOR_NOT_AVAILABLE error response
*/
env.kafkaClient().prepareResponse(
new LeaveGroupResponse(new LeaveGroupResponseData()
.setErrorCode(Errors.NOT_COORDINATOR.code())));
env.kafkaClient().prepareResponse(
prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
env.kafkaClient().prepareResponse(
new LeaveGroupResponse(new LeaveGroupResponseData()
.setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code())));
env.kafkaClient().prepareResponse(
prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
MemberResponse memberResponse = new MemberResponse()
.setGroupInstanceId("instance-1")
.setErrorCode(Errors.NONE.code());
env.kafkaClient().prepareResponse(
new LeaveGroupResponse(new LeaveGroupResponseData()
.setErrorCode(Errors.NONE.code())
.setMembers(Collections.singletonList(memberResponse))));
MemberToRemove memberToRemove = new MemberToRemove("instance-1");
Collection<MemberToRemove> membersToRemove = singletonList(memberToRemove);
final RemoveMembersFromConsumerGroupResult result = env.adminClient().removeMembersFromConsumerGroup(
GROUP_ID, new RemoveMembersFromConsumerGroupOptions(membersToRemove));
assertNull(result.all().get());
assertNull(result.memberResult(memberToRemove).get());
}
}
|
public double maxQueryGrowthRate(Duration window, Instant now) {
if (snapshots.isEmpty()) return 0.1;
// Find the period having the highest growth rate, where total growth exceeds 30% increase
double maxGrowthRate = 0; // In query rate growth per second (to get good resolution)
for (int start = 0; start < snapshots.size(); start++) {
if (start > 0) { // Optimization: Skip this point when starting from the previous is better relative to the best rate so far
Duration duration = durationBetween(start - 1, start);
if (duration.toSeconds() != 0) {
double growthRate = (queryRateAt(start - 1) - queryRateAt(start)) / duration.toSeconds();
if (growthRate >= maxGrowthRate)
continue;
}
}
// Find a subsequent snapshot where the query rate has increased significantly
for (int end = start + 1; end < snapshots.size(); end++) {
Duration duration = durationBetween(start, end);
if (duration.toSeconds() == 0) continue;
if (duration.compareTo(GROWTH_RATE_MIN_INTERVAL) < 0) continue; // Too short period to be considered
if (significantGrowthBetween(start, end)) {
double growthRate = (queryRateAt(end) - queryRateAt(start)) / duration.toSeconds();
if (growthRate > maxGrowthRate)
maxGrowthRate = growthRate;
}
}
}
if (maxGrowthRate == 0) { // No periods of significant growth
if (durationBetween(0, snapshots.size() - 1).toHours() < 24)
return 0.1; // ... because not much data
else
return 0.0; // ... because load is stable
}
OptionalDouble queryRate = queryRate(window, now);
if (queryRate.orElse(0) == 0) return 0.1; // Growth not expressible as a fraction of the current rate
return maxGrowthRate * 60 / queryRate.getAsDouble();
}
|
@Test
public void test_empty() {
ManualClock clock = new ManualClock();
var timeseries = new ClusterTimeseries(cluster, List.of());
assertEquals(0.1, timeseries.maxQueryGrowthRate(Duration.ofMinutes(5), clock.instant()), delta);
}
|
@Udf
public <T> Integer calcArrayLength(
@UdfParameter(description = "The array") final List<T> array
) {
if (array == null) {
return null;
}
return array.size();
}
|
@Test
public void shouldReturnArraySize() {
assertThat(udf.calcArrayLength(ImmutableList.of()), is(0));
assertThat(udf.calcArrayLength(ImmutableList.of(1)), is(1));
assertThat(udf.calcArrayLength(ImmutableList.of("one", "two")), is(2));
}
|
public static <T> RetryOperator<T> of(Retry retry) {
return new RetryOperator<>(retry);
}
|
@Test
public void retryOnResultFailAfterMaxAttemptsWithExceptionUsingFlux() {
RetryConfig config = RetryConfig.<String>custom()
.retryOnResult("retry"::equals)
.waitDuration(Duration.ofMillis(10))
.maxAttempts(3)
.failAfterMaxAttempts(true)
.build();
Retry retry = Retry.of("testName", config);
StepVerifier.create(Flux.just("retry")
.transformDeferred(RetryOperator.of(retry)))
.expectSubscription()
.expectNextCount(1)
.expectError(MaxRetriesExceededException.class)
.verify(Duration.ofSeconds(1));
Retry.Metrics metrics = retry.getMetrics();
assertThat(metrics.getNumberOfFailedCallsWithoutRetryAttempt()).isZero();
assertThat(metrics.getNumberOfFailedCallsWithRetryAttempt()).isEqualTo(1);
}
|
public String getFileName() {
return fileName;
}
|
@Test
void getFileName() {
LocalPredictionId retrieved = new LocalPredictionId(fileName, name);
assertThat(retrieved.getFileName()).isEqualTo(fileName);
}
|
public void clear() {
NamedClusterServiceOsgi ncso = meta.getNamedClusterServiceOsgi();
if ( ncso != null ) { //Don't kill the embedded if we don't have the service to rebuild
addedAllClusters = false;
addedAnyClusters = false;
// The embeddedMetaStoreFactory may be null if creating a brand new job and attempting to run before it ever
// saved.
if ( embeddedMetaStoreFactory != null ) {
try {
List<NamedClusterOsgi> list = embeddedMetaStoreFactory.getElements();
for ( NamedClusterOsgi nc : list ) {
namedClusterPool.put( nc.getName(), nc );
embeddedMetaStoreFactory.deleteElement( nc.getName() );
}
} catch ( MetaStoreException e ) {
logMetaStoreException( e );
}
}
}
}
|
@Test
public void testClear() throws Exception {
when( mockMetaStoreFactory.getElements() )
.thenReturn( Arrays.asList( mockNamedCluster1, mockNamedCluster2 ) );
namedClusterEmbedManager.clear( );
verify( mockMetaStoreFactory ).deleteElement( CLUSTER1_NAME );
verify( mockMetaStoreFactory ).deleteElement( CLUSTER2_NAME );
}
|
@Override
public void checkAuthorization(
final KsqlSecurityContext securityContext,
final MetaStore metaStore,
final Statement statement
) {
if (statement instanceof Query) {
validateQuery(securityContext, metaStore, (Query)statement);
} else if (statement instanceof InsertInto) {
validateInsertInto(securityContext, metaStore, (InsertInto)statement);
} else if (statement instanceof CreateAsSelect) {
validateCreateAsSelect(securityContext, metaStore, (CreateAsSelect)statement);
} else if (statement instanceof PrintTopic) {
validatePrintTopic(securityContext, (PrintTopic)statement);
} else if (statement instanceof CreateSource) {
validateCreateSource(securityContext, (CreateSource)statement);
}
}
|
@Test
public void shouldThrowWhenCreateAsSelectWithoutReadPermissionsDenied() {
// Given:
givenTopicAccessDenied(KAFKA_TOPIC, AclOperation.READ);
final Statement statement = givenStatement(String.format(
"CREATE STREAM newStream AS SELECT * FROM %s;", KAFKA_STREAM_TOPIC)
);
// When:
final Exception e = assertThrows(
KsqlTopicAuthorizationException.class,
() -> authorizationValidator.checkAuthorization(securityContext, metaStore, statement)
);
// Then:
assertThat(e.getMessage(), containsString(String.format(
"Authorization denied to Read on topic(s): [%s]", KAFKA_TOPIC
)));
}
|
public static StatementExecutorResponse execute(
final ConfiguredStatement<CreateConnector> statement,
final SessionProperties sessionProperties,
final KsqlExecutionContext executionContext,
final ServiceContext serviceContext
) {
final CreateConnector createConnector = statement.getStatement();
final ConnectClient client = serviceContext.getConnectClient();
final Optional<KsqlEntity> connectorsResponse = handleIfNotExists(
statement, createConnector, client);
if (connectorsResponse.isPresent()) {
return StatementExecutorResponse.handled(connectorsResponse);
}
final ConnectResponse<ConnectorInfo> response = client.create(
createConnector.getName(),
buildConnectorConfig(createConnector));
if (response.datum().isPresent()) {
return StatementExecutorResponse.handled(Optional.of(
new CreateConnectorEntity(
statement.getMaskedStatementText(),
response.datum().get()
)
));
}
if (response.error().isPresent()) {
final String errorMsg = "Failed to create connector: " + response.error().get();
throw new KsqlRestException(EndpointResponse.create()
.status(response.httpCode())
.entity(new KsqlErrorMessage(Errors.toErrorCode(response.httpCode()), errorMsg))
.build()
);
}
throw new IllegalStateException("Either response.datum() or response.error() must be present");
}
|
@Test
public void shouldReturnWarningOnExecuteWhenIfNotExistsSetConnectorExists() {
//Given:
givenConnectorExists();
//When
final Optional<KsqlEntity> entity = ConnectExecutor
.execute(CREATE_DUPLICATE_CONNECTOR_CONFIGURED,
mock(SessionProperties.class), null, serviceContext).getEntity();
//Then
assertThat("Expected non-empty response", entity.isPresent());
assertThat(entity.get(), instanceOf(WarningEntity.class));
}
|
public static int appendArchiveIdLabel(
final MutableDirectBuffer tempBuffer, final int offset, final long archiveId)
{
int suffixLength = 0;
suffixLength += tempBuffer.putStringWithoutLengthAscii(offset, ARCHIVE_ID_LABEL_PREFIX);
suffixLength += tempBuffer.putLongAscii(offset + suffixLength, archiveId);
return suffixLength;
}
|
@Test
void appendArchiveIdLabel()
{
final int offset = 13;
final long archiveId = -23462384L;
final UnsafeBuffer buffer = new UnsafeBuffer(new byte[100]);
final int length = ArchiveCounters.appendArchiveIdLabel(buffer, offset, archiveId);
assertEquals(ArchiveCounters.lengthOfArchiveIdLabel(archiveId), length);
final int prefixLength = ArchiveCounters.ARCHIVE_ID_LABEL_PREFIX.length();
assertEquals(
ArchiveCounters.ARCHIVE_ID_LABEL_PREFIX,
buffer.getStringWithoutLengthAscii(offset, prefixLength));
assertEquals(archiveId, buffer.parseLongAscii(offset + prefixLength, length - prefixLength));
}
|
@Override
public void trackViewAppClick(View view) {
}
|
@Test
public void trackViewAppClick() {
View view = new View(mApplication);
mSensorsAPI.setTrackEventCallBack(new SensorsDataTrackEventCallBack() {
@Override
public boolean onTrackEvent(String eventName, JSONObject eventProperties) {
Assert.fail();
return false;
}
});
mSensorsAPI.trackViewAppClick(view);
}
|
@Override
protected Result[] run(String value) {
final Map<String, Object> extractedJson;
try {
extractedJson = extractJson(value);
} catch (IOException e) {
throw new ExtractorException(e);
}
final List<Result> results = new ArrayList<>(extractedJson.size());
for (Map.Entry<String, Object> entry : extractedJson.entrySet()) {
results.add(new Result(entry.getValue(), entry.getKey(), -1, -1));
}
return results.toArray(new Result[results.size()]);
}
|
@Test
public void testRunWithNullInput() throws Exception {
assertThat(jsonExtractor.run(null)).isEmpty();
}
|
public static <E, T> List<T> toList(Collection<E> collection, Function<E, T> function) {
return toList(collection, function, false);
}
|
@Test
public void testTranslate2List() {
List<String> list = CollStreamUtil.toList(null, Student::getName);
assertEquals(list, Collections.EMPTY_LIST);
List<Student> students = new ArrayList<>();
list = CollStreamUtil.toList(students, Student::getName);
assertEquals(list, Collections.EMPTY_LIST);
students.add(new Student(1, 1, 1, "张三"));
students.add(new Student(1, 2, 2, "李四"));
students.add(new Student(2, 1, 1, "李四"));
students.add(new Student(2, 2, 2, "李四"));
students.add(new Student(2, 3, 2, "霸天虎"));
list = CollStreamUtil.toList(students, Student::getName);
List<String> compare = new ArrayList<>();
compare.add("张三");
compare.add("李四");
compare.add("李四");
compare.add("李四");
compare.add("霸天虎");
assertEquals(list, compare);
}
|
public static Map<String, String> labels(RunContext runContext, String prefix) {
return labels(runContext, prefix, true, false);
}
|
@Test
void labels() {
var runContext = runContext(runContextFactory, "very.very.very.very.very.very.very.very.very.very.very.very.long.namespace");
var labels = ScriptService.labels(runContext, "kestra.io/");
assertThat(labels.size(), is(6));
assertThat(labels.get("kestra.io/namespace"), is("very.very.very.very.very.very.very.very.very.very.very.very.lon"));
assertThat(labels.get("kestra.io/flow-id"), is("flowId"));
assertThat(labels.get("kestra.io/task-id"), is("task"));
assertThat(labels.get("kestra.io/execution-id"), is("executionId"));
assertThat(labels.get("kestra.io/taskrun-id"), is("taskrun"));
assertThat(labels.get("kestra.io/taskrun-attempt"), is("0"));
labels = ScriptService.labels(runContext, null, true, true);
assertThat(labels.size(), is(6));
assertThat(labels.get("namespace"), is("very.very.very.very.very.very.very.very.very.very.very.very.lon"));
assertThat(labels.get("flow-id"), is("flowid"));
assertThat(labels.get("task-id"), is("task"));
assertThat(labels.get("execution-id"), is("executionid"));
assertThat(labels.get("taskrun-id"), is("taskrun"));
assertThat(labels.get("taskrun-attempt"), is("0"));
}
|
public static Expression generateFilterExpression(SearchArgument sarg) {
return translate(sarg.getExpression(), sarg.getLeaves());
}
|
@Test
public void testInOperand() {
SearchArgument.Builder builder = SearchArgumentFactory.newBuilder();
SearchArgument arg =
builder.startAnd().in("salary", PredicateLeaf.Type.LONG, 3000L, 4000L).end().build();
UnboundPredicate expected = Expressions.in("salary", 3000L, 4000L);
UnboundPredicate actual =
(UnboundPredicate) HiveIcebergFilterFactory.generateFilterExpression(arg);
assertThat(expected.op()).isEqualTo(actual.op());
assertThat(expected.literals()).isEqualTo(actual.literals());
assertThat(expected.ref().name()).isEqualTo(actual.ref().name());
}
|
public RMNode selectLocalNode(
String hostName, Set<String> blacklist, Resource request) {
if (blacklist.contains(hostName)) {
return null;
}
RMNode node = nodeByHostName.get(hostName);
if (node != null) {
ClusterNode clusterNode = clusterNodes.get(node.getNodeID());
if (clusterNode != null && comparator
.compareAndIncrement(clusterNode, 1, request)) {
return node;
}
}
return null;
}
|
@Test
public void testSelectLocalNode() {
NodeQueueLoadMonitor selector = new NodeQueueLoadMonitor(
NodeQueueLoadMonitor.LoadComparator.QUEUE_LENGTH);
RMNode h1 = createRMNode("h1", 1, -1, 2, 5);
RMNode h2 = createRMNode("h2", 2, -1, 5, 5);
RMNode h3 = createRMNode("h3", 3, -1, 4, 5);
selector.addNode(null, h1);
selector.addNode(null, h2);
selector.addNode(null, h3);
selector.updateNode(h1);
selector.updateNode(h2);
selector.updateNode(h3);
// basic test for selecting node which has queue length less
// than queue capacity.
Set<String> blacklist = new HashSet<>();
RMNode node = selector.selectLocalNode(
"h1", blacklist, defaultResourceRequested);
Assert.assertEquals("h1", node.getHostName());
// if node has been added to blacklist
blacklist.add("h1");
node = selector.selectLocalNode(
"h1", blacklist, defaultResourceRequested);
Assert.assertNull(node);
node = selector.selectLocalNode(
"h2", blacklist, defaultResourceRequested);
Assert.assertNull(node);
node = selector.selectLocalNode(
"h3", blacklist, defaultResourceRequested);
Assert.assertEquals("h3", node.getHostName());
}
|
@Override
public Batch toBatch() {
return new SparkBatch(
sparkContext, table, readConf, groupingKeyType(), taskGroups(), expectedSchema, hashCode());
}
|
@Test
public void testUnpartitionedDays() throws Exception {
createUnpartitionedTable(spark, tableName);
SparkScanBuilder builder = scanBuilder();
DaysFunction.TimestampToDaysFunction function = new DaysFunction.TimestampToDaysFunction();
UserDefinedScalarFunc udf = toUDF(function, expressions(fieldRef("ts")));
Predicate predicate =
new Predicate(
"<",
expressions(
udf, dateLit(timestampStrToDayOrdinal("2018-11-20T00:00:00.000000+00:00"))));
pushFilters(builder, predicate);
Batch scan = builder.build().toBatch();
assertThat(scan.planInputPartitions().length).isEqualTo(10);
// NOT LT
builder = scanBuilder();
predicate = new Not(predicate);
pushFilters(builder, predicate);
scan = builder.build().toBatch();
assertThat(scan.planInputPartitions().length).isEqualTo(10);
}
|
static ProcessorSupplier readMapIndexSupplier(MapIndexScanMetadata indexScanMetadata) {
return new MapIndexScanProcessorSupplier(indexScanMetadata);
}
|
@Test
public void test_fullScanDesc_sorted() {
List<JetSqlRow> expected = new ArrayList<>();
for (int i = 0; i <= count; i++) {
map.put(i, new Person("value-" + i, i));
expected.add(jetRow((count - i), "value-" + (count - i), (count - i)));
}
IndexConfig indexConfig = new IndexConfig(IndexType.SORTED, "age").setName(randomName());
map.addIndex(indexConfig);
MapIndexScanMetadata metadata = metadata(indexConfig.getName(), null, 2, true);
TestSupport
.verifyProcessor(adaptSupplier(MapIndexScanP.readMapIndexSupplier(metadata)))
.hazelcastInstance(instance())
.jobConfig(new JobConfig().setArgument(SQL_ARGUMENTS_KEY_NAME, emptyList()))
.outputChecker(LENIENT_SAME_ITEMS_IN_ORDER)
.disableSnapshots()
.disableProgressAssertion()
.expectOutput(expected);
}
|
public static <T> MutationDetector forValueWithCoder(T value, Coder<T> coder)
throws CoderException {
if (value == null) {
return noopMutationDetector();
} else {
return new CodedValueMutationDetector<>(value, coder);
}
}
|
@Test
public void testMutationWithEqualEncodings() throws Exception {
class EncodingBadStructuralValueCoder extends AtomicCoder<List<Object>> {
@Override
public void encode(List<Object> value, OutputStream outStream)
throws CoderException, IOException {
outStream.write(new byte[] {1, 2, -3, 45});
}
@Override
public List<Object> decode(InputStream inStream) throws CoderException, IOException {
// Consume the written bytes
inStream.read(new byte[4]);
return new ArrayList<>();
}
@Override
public Object structuralValue(List<Object> value) {
// Structural values are never equal to each other.
return new Object();
}
}
List<Object> ls = new ArrayList<>();
ls.add(1);
ls.add("foo");
MutationDetector detector =
MutationDetectors.forValueWithCoder(ls, new EncodingBadStructuralValueCoder());
ls.add(new Byte[] {1, 2, -3, 45});
// The structural values should be unequal, but the encoded bytes are equivalent, which is the
// system definition of equality.
detector.verifyUnmodified();
}
|
private static JiffiesAndCpus getTotalSystemJiffies() {
try {
BufferedReader in = new BufferedReader(new FileReader("/proc/stat"));
return getTotalSystemJiffies(in);
} catch (FileNotFoundException ex) {
log.log(Level.SEVERE, "Unable to open stat file", ex);
return new JiffiesAndCpus();
}
}
|
@Test
public
void testTotalJiffies() {
SystemPoller.JiffiesAndCpus first = SystemPoller.getTotalSystemJiffies(new BufferedReader(new StringReader(totalStats[0])));
SystemPoller.JiffiesAndCpus second = SystemPoller.getTotalSystemJiffies(new BufferedReader(new StringReader(totalStats[1])));
assertEquals(8, first.cpus);
assertEquals(first.cpus, second.cpus);
assertEquals(142828460L, first.jiffies);
assertEquals(143096563L, second.jiffies);
assertEquals(0.05601124593795943, first.ratioSingleCoreJiffies(1000000), DELTA);
assertEquals(0.05590630433241083, second.ratioSingleCoreJiffies(1000000), DELTA);
SystemPoller.JiffiesAndCpus diff = second.diff(first);
assertEquals(8, diff.cpus);
assertEquals(268103L, diff.jiffies);
assertEquals(2.9839278187860634, diff.ratioSingleCoreJiffies(100000), DELTA);
assertEquals(0.3729909773482579, diff.ratioJiffies(100000), DELTA);
}
|
public Result runIndexOrPartitionScanQueryOnOwnedPartitions(Query query) {
Result result = runIndexOrPartitionScanQueryOnOwnedPartitions(query, true);
assert result != null;
return result;
}
|
@Test
public void verifyIndexedQueryFailureWhileMigratingInFlight() {
map.addIndex(IndexType.HASH, "this");
EqualPredicate predicate = new EqualPredicate("this", value) {
@Override
public Set<QueryableEntry> filter(QueryContext queryContext) {
// start a new migration while executing an indexed query
mapService.beforeMigration(new PartitionMigrationEvent(MigrationEndpoint.SOURCE, partitionId, 0, 1,
UUID.randomUUID()));
return super.filter(queryContext);
}
};
Query query = Query.of()
.mapName(map.getName())
.predicate(predicate)
.iterationType(IterationType.ENTRY)
.partitionIdSet(SetUtil.allPartitionIds(instance.getPartitionService().getPartitions().size()))
.build();
QueryResult result = (QueryResult) queryRunner.runIndexOrPartitionScanQueryOnOwnedPartitions(query);
assertNull(result.getPartitionIds());
}
|
@DeleteMapping("/batch")
@RequiresPermissions("system:pluginHandler:delete")
public ShenyuAdminResult deletePluginHandles(@RequestBody @NotEmpty final List<@NotBlank String> ids) {
return ShenyuAdminResult.success(ShenyuResultMessage.DELETE_SUCCESS, pluginHandleService.deletePluginHandles(ids));
}
|
@Test
public void testDeletePluginHandles() throws Exception {
given(this.pluginHandleService.deletePluginHandles(Collections.singletonList("1"))).willReturn(1);
this.mockMvc.perform(MockMvcRequestBuilders.delete("/plugin-handle/batch", "1")
.contentType(MediaType.APPLICATION_JSON)
.content(GsonUtils.getInstance().toJson(Collections.singletonList("1"))))
.andExpect(status().isOk())
.andExpect(jsonPath("$.message", is(ShenyuResultMessage.DELETE_SUCCESS)))
.andReturn();
}
|
@Override
public boolean retryRequest(final HttpResponse response, final int executionCount, final HttpContext context) {
switch(response.getStatusLine().getStatusCode()) {
case HttpStatus.SC_MOVED_TEMPORARILY:
try {
log.info(String.format("Attempt to refresh cookie for failure %s", response));
this.validate();
// Try again
return true;
}
catch(BackgroundException e) {
log.error(String.format("Failure refreshing cookie. %s", e));
return false;
}
}
return false;
}
|
@Test
public void retryRequest() {
final ServiceUnavailableRetryStrategy handler =
new CustomServiceUnavailableRetryStrategy(session.getHost(), 2,
new ExecutionCountServiceUnavailableRetryStrategy(1, session.authentication));
assertTrue(handler.retryRequest(
new BasicHttpResponse(new BasicStatusLine(HTTP_1_1, HttpStatus.SC_SERVICE_UNAVAILABLE, "Service Unavailable")),
1, new BasicHttpContext()));
assertTrue(handler.retryRequest(
new BasicHttpResponse(new BasicStatusLine(HTTP_1_1, HttpStatus.SC_SERVICE_UNAVAILABLE, "Service Unavailable")),
2, new BasicHttpContext()));
assertTrue(handler.retryRequest(
new BasicHttpResponse(new BasicStatusLine(HTTP_1_1, HttpStatus.SC_MOVED_TEMPORARILY, "Service Unavailable")),
1, new BasicHttpContext()));
assertFalse(handler.retryRequest(
new BasicHttpResponse(new BasicStatusLine(HTTP_1_1, HttpStatus.SC_MOVED_TEMPORARILY, "Service Unavailable")),
2, new BasicHttpContext()));
}
|
public List<DeviceId> devices() {
return object.has(DEVICES) ? getList(DEVICES, DeviceId::deviceId) : null;
}
|
@Test
public void modifyDevices() {
loadRegion(R3);
cfg.devices(ALT_DEVICES);
checkRegion(R3, null, ALT_DEVICES);
}
|
@Override
public void handleWayTags(int edgeId, EdgeIntAccess edgeIntAccess, ReaderWay readerWay, IntsRef relationFlags) {
if (readerWay.hasTag("hazmat:water", "no")) {
hazWaterEnc.setEnum(false, edgeId, edgeIntAccess, HazmatWater.NO);
} else if (readerWay.hasTag("hazmat:water", "permissive")) {
hazWaterEnc.setEnum(false, edgeId, edgeIntAccess, HazmatWater.PERMISSIVE);
}
}
|
@Test
public void testSimpleTags() {
ReaderWay readerWay = new ReaderWay(1);
EdgeIntAccess edgeIntAccess = new ArrayEdgeIntAccess(1);
int edgeId = 0;
readerWay.setTag("hazmat:water", "no");
parser.handleWayTags(edgeId, edgeIntAccess, readerWay, relFlags);
assertEquals(HazmatWater.NO, hazWaterEnc.getEnum(false, edgeId, edgeIntAccess));
edgeIntAccess = new ArrayEdgeIntAccess(1);
readerWay.setTag("hazmat:water", "yes");
parser.handleWayTags(edgeId, edgeIntAccess, readerWay, relFlags);
assertEquals(HazmatWater.YES, hazWaterEnc.getEnum(false, edgeId, edgeIntAccess));
edgeIntAccess = new ArrayEdgeIntAccess(1);
readerWay.setTag("hazmat:water", "permissive");
parser.handleWayTags(edgeId, edgeIntAccess, readerWay, relFlags);
assertEquals(HazmatWater.PERMISSIVE, hazWaterEnc.getEnum(false, edgeId, edgeIntAccess));
}
|
@Override
public DescriptiveUrl toUploadUrl(final Path file, final Sharee sharee, final Void options, final PasswordCallback callback) throws BackgroundException {
try {
final Host bookmark = session.getHost();
final CreateFileShareRequest request = new CreateFileShareRequest()
.fileId(fileid.getFileId(file))
.allowUpload(true);
request.setPassword(callback.prompt(bookmark,
LocaleFactory.localizedString("Passphrase", "Cryptomator"),
MessageFormat.format(LocaleFactory.localizedString("Create a passphrase required to access {0}", "Credentials"), file.getName()),
new LoginOptions().anonymous(true).keychain(false).icon(bookmark.getProtocol().disk())).getPassword());
return new DescriptiveUrl(URI.create(
new FileSharesApi(session.getClient()).fileSharesPost_0(request).getUrl()), DescriptiveUrl.Type.signed);
}
catch(ApiException e) {
throw new StoregateExceptionMappingService(fileid).map(e);
}
}
|
@Test
public void toUploadUrl() throws Exception {
final StoregateIdProvider nodeid = new StoregateIdProvider(session);
final Path room = new StoregateDirectoryFeature(session, nodeid).mkdir(
new Path(String.format("/My files/%s", new AlphanumericRandomStringService().random()),
EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
assertNotNull(new StoregateShareFeature(session, nodeid).toUploadUrl(room, Share.Sharee.world, null, new DisabledPasswordCallback()).getUrl());
new StoregateDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledPasswordCallback(), new Delete.DisabledCallback());
}
|
public static GetApplicationsResponse mergeApplications(
Collection<GetApplicationsResponse> responses,
boolean returnPartialResult){
Map<ApplicationId, ApplicationReport> federationAM = new HashMap<>();
Map<ApplicationId, ApplicationReport> federationUAMSum = new HashMap<>();
for (GetApplicationsResponse appResponse : responses){
for (ApplicationReport appReport : appResponse.getApplicationList()){
ApplicationId appId = appReport.getApplicationId();
// Check if this ApplicationReport is an AM
if (!appReport.isUnmanagedApp()) {
// Insert in the list of AM
federationAM.put(appId, appReport);
// Check if there are any UAM found before
if (federationUAMSum.containsKey(appId)) {
// Merge the current AM with the found UAM
mergeAMWithUAM(appReport, federationUAMSum.get(appId));
// Remove the sum of the UAMs
federationUAMSum.remove(appId);
}
// This ApplicationReport is an UAM
} else if (federationAM.containsKey(appId)) {
// Merge the current UAM with its own AM
mergeAMWithUAM(federationAM.get(appId), appReport);
} else if (federationUAMSum.containsKey(appId)) {
// Merge the current UAM with its own UAM and update the list of UAM
ApplicationReport mergedUAMReport =
mergeUAMWithUAM(federationUAMSum.get(appId), appReport);
federationUAMSum.put(appId, mergedUAMReport);
} else {
// Insert in the list of UAM
federationUAMSum.put(appId, appReport);
}
}
}
// Check the remaining UAMs are depending or not from federation
for (ApplicationReport appReport : federationUAMSum.values()) {
if (mergeUamToReport(appReport.getName(), returnPartialResult)) {
federationAM.put(appReport.getApplicationId(), appReport);
}
}
return GetApplicationsResponse.newInstance(federationAM.values());
}
|
@Test
public void testMergeApplicationsNullResourceUsage() {
ApplicationId appId = ApplicationId.newInstance(1234, 1);
ApplicationReport appReport = ApplicationReport.newInstance(
appId, ApplicationAttemptId.newInstance(appId, 1),
"user", "queue", "app1", "host",
124, null, YarnApplicationState.RUNNING,
"diagnostics", "url", 0, 0,
0, FinalApplicationStatus.SUCCEEDED, null, "N/A",
0.53789f, "YARN", null, null, false, null, null, null);
ApplicationReport uamAppReport = ApplicationReport.newInstance(
appId, ApplicationAttemptId.newInstance(appId, 1),
"user", "queue", "app1", "host",
124, null, YarnApplicationState.RUNNING,
"diagnostics", "url", 0, 0,
0, FinalApplicationStatus.SUCCEEDED, null, "N/A",
0.53789f, "YARN", null, null, true, null, null, null);
ArrayList<GetApplicationsResponse> responses = new ArrayList<>();
List<ApplicationReport> applications = new ArrayList<>();
applications.add(appReport);
applications.add(uamAppReport);
responses.add(GetApplicationsResponse.newInstance(applications));
GetApplicationsResponse result = RouterYarnClientUtils.
mergeApplications(responses, false);
Assert.assertNotNull(result);
Assert.assertEquals(1, result.getApplicationList().size());
String appName = result.getApplicationList().get(0).getName();
// Check that no Unmanaged applications are added to the result
Assert.assertFalse(appName.contains(UnmanagedApplicationManager.APP_NAME));
}
|
List<String> doRetrieve(GroupVersionKind type, ListOptions options, Sort sort) {
var indexer = indexerFactory.getIndexer(type);
StopWatch stopWatch = new StopWatch(type.toString());
stopWatch.start("Check index status to ensure all indexes are ready");
var fieldNamesUsedInQuery = getFieldNamesUsedInListOptions(options, sort);
checkIndexForNames(indexer, fieldNamesUsedInQuery);
stopWatch.stop();
var indexView = new QueryIndexViewImpl(indexer);
stopWatch.start("Evaluate selectors for index");
var resultSet = evaluateSelectorsForIndex(indexer, indexView, options);
stopWatch.stop();
stopWatch.start("Sort result set by sort order");
var result = indexView.sortBy(resultSet, sort);
stopWatch.stop();
if (log.isTraceEnabled()) {
log.trace("Retrieve result from indexer by query [{}],\n {}", options,
stopWatch.prettyPrint(TimeUnit.MILLISECONDS));
}
return result;
}
|
@Test
void doRetrieve() {
var indexer = mock(Indexer.class);
var gvk = GroupVersionKind.fromExtension(DemoExtension.class);
when(indexerFactory.getIndexer(eq(gvk))).thenReturn(indexer);
pileForIndexer(indexer, PrimaryKeySpecUtils.PRIMARY_INDEX_NAME, List.of(
Map.entry("object1", "object1"),
Map.entry("object2", "object2"),
Map.entry("object3", "object3")
));
pileForIndexer(indexer, LabelIndexSpecUtils.LABEL_PATH, List.of(
Map.entry("key1=value1", "object1"),
Map.entry("key2=value2", "object1"),
Map.entry("key1=value1", "object2"),
Map.entry("key2=value2", "object2"),
Map.entry("key1=value1", "object3")
));
pileForIndexer(indexer, "slug", List.of(
Map.entry("slug1", "object1"),
Map.entry("slug2", "object2")
));
var listOptions = new ListOptions();
listOptions.setLabelSelector(LabelSelector.builder()
.eq("key1", "value1").build());
listOptions.setFieldSelector(FieldSelector.of(equal("slug", "slug1")));
var result = indexedQueryEngine.doRetrieve(gvk, listOptions, Sort.unsorted());
assertThat(result).containsExactly("object1");
}
|
@Override
public void removeConfigInfo(final String dataId, final String group, final String tenant, final String srcIp,
final String srcUser) {
final Timestamp time = new Timestamp(System.currentTimeMillis());
ConfigInfo configInfo = findConfigInfo(dataId, group, tenant);
if (Objects.nonNull(configInfo)) {
try {
String tenantTmp = StringUtils.isBlank(tenant) ? StringUtils.EMPTY : tenant;
removeConfigInfoAtomic(dataId, group, tenantTmp, srcIp, srcUser);
removeTagByIdAtomic(configInfo.getId());
historyConfigInfoPersistService.insertConfigHistoryAtomic(configInfo.getId(), configInfo, srcIp,
srcUser, time, "D");
EmbeddedStorageContextUtils.onDeleteConfigInfo(tenantTmp, group, dataId, srcIp, time);
boolean result = databaseOperate.update(EmbeddedStorageContextHolder.getCurrentSqlContext());
if (!result) {
throw new NacosConfigException("config deletion failed");
}
} finally {
EmbeddedStorageContextHolder.cleanAllContext();
}
}
}
|
@Test
void testRemoveConfigInfo() {
String dataId = "dataId4567";
String group = "group3456789";
String tenant = "tenant4567890";
//mock exist config info
ConfigInfoWrapper configInfoWrapperOld = new ConfigInfoWrapper();
configInfoWrapperOld.setDataId(dataId);
configInfoWrapperOld.setGroup(group);
configInfoWrapperOld.setTenant(tenant);
configInfoWrapperOld.setAppName("old_app");
configInfoWrapperOld.setContent("old content");
configInfoWrapperOld.setMd5("old_md5");
configInfoWrapperOld.setId(12345678765L);
configInfoWrapperOld.setEncryptedDataKey("key3456");
Mockito.when(databaseOperate.queryOne(anyString(), eq(new Object[] {dataId, group, tenant}), eq(CONFIG_INFO_WRAPPER_ROW_MAPPER)))
.thenReturn(configInfoWrapperOld);
String srcIp = "srcIp1234";
String srcUser = "srcUser";
Mockito.when(databaseOperate.update(any())).thenReturn(true);
embeddedConfigInfoPersistService.removeConfigInfo(dataId, group, tenant, srcIp, srcUser);
//expect delete config to be invoked
embeddedStorageContextHolderMockedStatic.verify(
() -> EmbeddedStorageContextHolder.addSqlContext(anyString(), eq(dataId), eq(group), eq(tenant)), times(1));
//expect delete config tag to be invoked
embeddedStorageContextHolderMockedStatic.verify(
() -> EmbeddedStorageContextHolder.addSqlContext(anyString(), eq(configInfoWrapperOld.getId())), times(1));
//expect insert delete history
Mockito.verify(historyConfigInfoPersistService, times(1))
.insertConfigHistoryAtomic(eq(configInfoWrapperOld.getId()), eq(configInfoWrapperOld), eq(srcIp), eq(srcUser), any(),
eq("D"));
}
|
public CoercedExpressionResult coerce() {
final Class<?> leftClass = left.getRawClass();
final Class<?> nonPrimitiveLeftClass = toNonPrimitiveType(leftClass);
final Class<?> rightClass = right.getRawClass();
final Class<?> nonPrimitiveRightClass = toNonPrimitiveType(rightClass);
boolean sameClass = leftClass == rightClass;
boolean isUnificationExpression = left instanceof UnificationTypedExpression || right instanceof UnificationTypedExpression;
if (sameClass || isUnificationExpression) {
return new CoercedExpressionResult(left, right);
}
if (!canCoerce()) {
throw new CoercedExpressionException(new InvalidExpressionErrorResult("Comparison operation requires compatible types. Found " + leftClass + " and " + rightClass));
}
if ((nonPrimitiveLeftClass == Integer.class || nonPrimitiveLeftClass == Long.class) && nonPrimitiveRightClass == Double.class) {
CastExpr castExpression = new CastExpr(PrimitiveType.doubleType(), this.left.getExpression());
return new CoercedExpressionResult(
new TypedExpression(castExpression, double.class, left.getType()),
right,
false);
}
final boolean leftIsPrimitive = leftClass.isPrimitive() || Number.class.isAssignableFrom( leftClass );
final boolean canCoerceLiteralNumberExpr = canCoerceLiteralNumberExpr(leftClass);
boolean rightAsStaticField = false;
final Expression rightExpression = right.getExpression();
final TypedExpression coercedRight;
if (leftIsPrimitive && canCoerceLiteralNumberExpr && rightExpression instanceof LiteralStringValueExpr) {
final Expression coercedLiteralNumberExprToType = coerceLiteralNumberExprToType((LiteralStringValueExpr) right.getExpression(), leftClass);
coercedRight = right.cloneWithNewExpression(coercedLiteralNumberExprToType);
coercedRight.setType( leftClass );
} else if (shouldCoerceBToString(left, right)) {
coercedRight = coerceToString(right);
} else if (isNotBinaryExpression(right) && canBeNarrowed(leftClass, rightClass) && right.isNumberLiteral()) {
coercedRight = castToClass(leftClass);
} else if (leftClass == long.class && rightClass == int.class) {
coercedRight = right.cloneWithNewExpression(new CastExpr(PrimitiveType.longType(), right.getExpression()));
} else if (leftClass == Date.class && rightClass == String.class) {
coercedRight = coerceToDate(right);
rightAsStaticField = true;
} else if (leftClass == LocalDate.class && rightClass == String.class) {
coercedRight = coerceToLocalDate(right);
rightAsStaticField = true;
} else if (leftClass == LocalDateTime.class && rightClass == String.class) {
coercedRight = coerceToLocalDateTime(right);
rightAsStaticField = true;
} else if (shouldCoerceBToMap()) {
coercedRight = castToClass(toNonPrimitiveType(leftClass));
} else if (isBoolean(leftClass) && !isBoolean(rightClass)) {
coercedRight = coerceBoolean(right);
} else {
coercedRight = right;
}
final TypedExpression coercedLeft;
if (nonPrimitiveLeftClass == Character.class && shouldCoerceBToString(right, left)) {
coercedLeft = coerceToString(left);
} else {
coercedLeft = left;
}
return new CoercedExpressionResult(coercedLeft, coercedRight, rightAsStaticField);
}
|
@Test
public void doNotCastNumberLiteralInt() {
final TypedExpression left = expr("getValue()", java.lang.Object.class);
final TypedExpression right = expr("20", int.class);
final CoercedExpression.CoercedExpressionResult coerce = new CoercedExpression(left, right, false).coerce();
assertThat(coerce.getCoercedRight()).isEqualTo(expr("20", int.class));
}
|
@VisibleForTesting
MatchResult expand(GcsPath gcsPattern) throws IOException {
String prefix = GcsUtil.getNonWildcardPrefix(gcsPattern.getObject());
Pattern p = Pattern.compile(wildcardToRegexp(gcsPattern.getObject()));
LOG.debug(
"matching files in bucket {}, prefix {} against pattern {}",
gcsPattern.getBucket(),
prefix,
p.toString());
String pageToken = null;
List<Metadata> results = new ArrayList<>();
do {
Objects objects = options.getGcsUtil().listObjects(gcsPattern.getBucket(), prefix, pageToken);
if (objects.getItems() == null) {
break;
}
// Filter objects based on the regex.
for (StorageObject o : objects.getItems()) {
String name = o.getName();
// Skip directories, which end with a slash.
if (p.matcher(name).matches() && !name.endsWith("/")) {
LOG.debug("Matched object: {}", name);
results.add(toMetadata(o));
}
}
pageToken = objects.getNextPageToken();
} while (pageToken != null);
return MatchResult.create(Status.OK, results);
}
|
@Test
public void testExpandNonGlob() throws Exception {
thrown.expect(IllegalArgumentException.class);
thrown.expectMessage("Glob expression: [testdirectory/otherfile] is not expandable.");
gcsFileSystem.expand(GcsPath.fromUri("gs://testbucket/testdirectory/otherfile"));
}
|
@Override
public ListenableFuture<SplitBatch> getNextBatch(ConnectorPartitionHandle partitionHandle, Lifespan lifespan, int maxSize)
{
checkArgument(maxSize > 0, "Cannot fetch a batch of zero size");
return GetNextBatch.fetchNextBatchAsync(source, Math.min(bufferSize, maxSize), maxSize, partitionHandle, lifespan);
}
|
@Test
public void testFail()
{
MockSplitSource mockSource = new MockSplitSource()
.setBatchSize(1)
.increaseAvailableSplits(1)
.atSplitCompletion(FAIL);
try (SplitSource source = new BufferingSplitSource(mockSource, 100)) {
assertFutureFailsWithMockFailure(getNextBatch(source, 2));
assertEquals(mockSource.getNextBatchInvocationCount(), 2);
}
}
|
public static GrpcServerExecutorMetric getSdkServerExecutorMetric() {
return sdkServerExecutorMetric;
}
|
@Test
void testSdkServerExecutorMetric() {
MetricsMonitor.getSdkServerExecutorMetric().getPoolSize().set(1);
MetricsMonitor.getSdkServerExecutorMetric().getMaximumPoolSize().set(1);
MetricsMonitor.getSdkServerExecutorMetric().getCorePoolSize().set(1);
MetricsMonitor.getSdkServerExecutorMetric().getActiveCount().set(1);
MetricsMonitor.getSdkServerExecutorMetric().getInQueueTaskCount().set(1);
MetricsMonitor.getSdkServerExecutorMetric().getTaskCount().set(1);
MetricsMonitor.getSdkServerExecutorMetric().getCompletedTaskCount().set(1);
assertEquals("grpcSdkServer", MetricsMonitor.getSdkServerExecutorMetric().getType());
assertEquals(1, MetricsMonitor.getSdkServerExecutorMetric().getPoolSize().get());
assertEquals(1, MetricsMonitor.getSdkServerExecutorMetric().getMaximumPoolSize().get());
assertEquals(1, MetricsMonitor.getSdkServerExecutorMetric().getCorePoolSize().get());
assertEquals(1, MetricsMonitor.getSdkServerExecutorMetric().getActiveCount().get());
assertEquals(1, MetricsMonitor.getSdkServerExecutorMetric().getInQueueTaskCount().get());
assertEquals(1, MetricsMonitor.getSdkServerExecutorMetric().getTaskCount().get());
assertEquals(1, MetricsMonitor.getSdkServerExecutorMetric().getCompletedTaskCount().get());
}
|
@Override
public InterpreterResult interpret(String st, InterpreterContext context) {
return helper.interpret(session, st, context);
}
|
@Test
void should_parse_date_value() {
// Given
String queries = "@prepare[parse_date]=INSERT INTO zeppelin.users(login,last_update) " +
"VALUES(?,?)\n" +
"@bind[parse_date]='last_update','2015-07-30 12:00:01'\n" +
"SELECT last_update FROM zeppelin.users WHERE login='last_update';";
// When
final InterpreterResult actual = interpreter.interpret(queries, intrContext);
// Then
assertEquals(Code.SUCCESS, actual.code());
assertTrue(actual.message().get(0).getData().contains("last_update\n2015-07-30T12:00:01.000Z"),
actual.message().get(0).getData());
}
|
@Override
public boolean test(final Path test) {
return this.equals(new DefaultPathPredicate(test));
}
|
@Test
public void testPredicateVersionIdFile() {
final Path t = new Path("/f", EnumSet.of(Path.Type.file), new PathAttributes().withVersionId("1"));
assertTrue(new DefaultPathPredicate(t).test(t));
assertTrue(new DefaultPathPredicate(t).test(new Path("/f", EnumSet.of(Path.Type.file), new PathAttributes().withVersionId("1"))));
assertFalse(new DefaultPathPredicate(t).test(new Path("/f", EnumSet.of(Path.Type.file), new PathAttributes().withVersionId("2"))));
}
|
public static AvroGenericCoder of(Schema schema) {
return AvroGenericCoder.of(schema);
}
|
@Test
public void testDeterminismUnion() {
assertDeterministic(AvroCoder.of(DeterministicUnionBase.class));
assertNonDeterministic(
AvroCoder.of(NonDeterministicUnionBase.class),
reasonField(UnionCase3.class, "mapField", "may not be deterministically ordered"));
}
|
public static AS2MessageDispositionNotificationEntity parseDispositionNotification(
List<CharArrayBuffer> dispositionNotificationFields)
throws ParseException {
String reportingUA = null;
String mtaName = null;
String finalRecipient = null;
String originalMessageId = null;
DispositionMode dispositionMode = null;
AS2DispositionType dispositionType = null;
AS2DispositionModifier dispositionModifier = null;
List<String> failures = new ArrayList<>();
List<String> errors = new ArrayList<>();
List<String> warnings = new ArrayList<>();
Map<String, String> extensionFields = new HashMap<>();
ReceivedContentMic receivedContentMic = null;
CharArrayBuffer bodyPartFields = new CharArrayBuffer(DEFAULT_BUFFER_SIZE);
for (int i = 0; i < dispositionNotificationFields.size(); i++) {
final CharArrayBuffer fieldLine = dispositionNotificationFields.get(i);
bodyPartFields.append(fieldLine);
if (i < dispositionNotificationFields.size() - 1) {
bodyPartFields.append('\r');
bodyPartFields.append('\n');
}
final Field field = parseDispositionField(fieldLine);
switch (field.getName().toLowerCase()) {
case REPORTING_UA: {
if (field.getElements().length < 1) {
throw new ParseException("Invalid '" + MDNField.REPORTING_UA + "' field: UA name is missing");
}
reportingUA = field.getValue();
break;
}
case MDN_GATEWAY: {
Element[] elements = field.getElements();
if (elements.length < 2) {
throw new ParseException("Invalid '" + MDNField.MDN_GATEWAY + "' field: MTA name is missing");
}
mtaName = elements[1].getValue();
break;
}
case FINAL_RECIPIENT: {
Element[] elements = field.getElements();
if (elements.length < 2) {
throw new ParseException(
"Invalid '" + MDNField.FINAL_RECIPIENT + "' field: recipient address is missing");
}
finalRecipient = elements[1].getValue();
break;
}
case ORIGINAL_MESSAGE_ID: {
originalMessageId = field.getValue();
break;
}
case DISPOSITION: {
Element[] elements = field.getElements();
if (elements.length < 2) {
throw new ParseException("Invalid '" + MDNField.DISPOSITION + "' field: " + field.getValue());
}
dispositionMode = DispositionMode.parseDispositionMode(elements[0].getValue());
if (dispositionMode == null) {
throw new ParseException(
"Invalid '" + MDNField.DISPOSITION + "' field: invalid disposition mode '"
+ elements[0].getValue() + "'");
}
String dispositionTypeString = elements[1].getValue();
int slash = dispositionTypeString.indexOf("/");
if (slash == -1) {
dispositionType = AS2DispositionType.parseDispositionType(dispositionTypeString);
} else {
dispositionType = AS2DispositionType.parseDispositionType(dispositionTypeString.substring(0, slash));
dispositionModifier
= AS2DispositionModifier.parseDispositionType(dispositionTypeString.substring(slash + 1));
}
break;
}
case FAILURE:
failures.add(field.getValue());
break;
case ERROR:
errors.add(field.getValue());
break;
case WARNING:
warnings.add(field.getValue());
break;
case RECEIVED_CONTENT_MIC: {
Element[] elements = field.getElements();
if (elements.length < 1) {
throw new ParseException("Invalid '" + MDNField.RECEIVED_CONTENT_MIC + "' field: MIC is missing");
}
Element element = elements[0];
String[] parameters = element.getParameters();
if (parameters.length < 1) {
throw new ParseException(
"Invalid '" + MDNField.RECEIVED_CONTENT_MIC + "' field: digest algorithm ID is missing");
}
String digestAlgorithmId = parameters[0];
String encodedMessageDigest = element.getValue();
receivedContentMic = new ReceivedContentMic(digestAlgorithmId, encodedMessageDigest);
break;
}
default: // Extension Field
extensionFields.put(field.getName(), field.getValue());
}
}
return new AS2MessageDispositionNotificationEntity(
reportingUA,
mtaName,
finalRecipient,
originalMessageId,
dispositionMode,
dispositionType,
dispositionModifier,
failures.toArray(new String[0]),
errors.toArray(new String[0]),
warnings.toArray(new String[0]),
extensionFields,
receivedContentMic,
bodyPartFields.toString());
}
|
@Test
public void test() throws Exception {
InputStream is = new ByteArrayInputStream(DISPOSITION_NOTIFICATION_CONTENT.getBytes());
AS2SessionInputBuffer inbuffer = new AS2SessionInputBuffer(new BasicHttpTransportMetrics(), 8 * 1024);
List<CharArrayBuffer> dispositionNotificationFields
= EntityParser.parseBodyPartFields(inbuffer, is, null, BasicLineParser.INSTANCE,
new ArrayList<CharArrayBuffer>());
AS2MessageDispositionNotificationEntity messageDispositionNotificationEntity
= DispositionNotificationContentUtils.parseDispositionNotification(dispositionNotificationFields);
assertEquals(EXPECTED_REPORTING_UA, messageDispositionNotificationEntity.getReportingUA(),
"Unexpected Reporting UA value");
assertEquals(EXPECTED_MTN_NAME, messageDispositionNotificationEntity.getMtnName(), "Unexpected MTN Name");
assertEquals(EXPECTED_ORIGINAL_RECIPIENT,
messageDispositionNotificationEntity.getExtensionFields().get("Original-Recipient"),
"Unexpected Original Recipient");
assertEquals(EXPECTED_FINAL_RECIPIENT, messageDispositionNotificationEntity.getFinalRecipient(),
"Unexpected Final Reciptient");
assertEquals(EXPECTED_ORIGINAL_MESSAGE_ID, messageDispositionNotificationEntity.getOriginalMessageId(),
"Unexpected Original Message ID");
assertEquals(EXPECTED_DISPOSITION_MODE, messageDispositionNotificationEntity.getDispositionMode(),
"Unexpected Disposition Mode");
assertNotNull(messageDispositionNotificationEntity.getDispositionModifier(), "Unexpected Null Disposition Modifier");
assertEquals(EXPECTED_DISPOSITION_MODIFIER, messageDispositionNotificationEntity.getDispositionModifier().getModifier(),
"Unexpected Disposition Modifier");
assertEquals(EXPECTED_DISPOSITION_TYPE, messageDispositionNotificationEntity.getDispositionType(),
"Unexpected Disposition Type");
assertArrayEquals(EXPECTED_FAILURE, messageDispositionNotificationEntity.getFailureFields(),
"Unexpected Failure Array value");
assertArrayEquals(EXPECTED_ERROR, messageDispositionNotificationEntity.getErrorFields(),
"Unexpected Error Array value");
assertArrayEquals(EXPECTED_WARNING, messageDispositionNotificationEntity.getWarningFields(),
"Unexpected Warning Array value");
assertNotNull(messageDispositionNotificationEntity.getReceivedContentMic(), "Unexpected Null Received Content MIC");
assertEquals(EXPECTED_ENCODED_MESSAGE_DIGEST,
messageDispositionNotificationEntity.getReceivedContentMic().getEncodedMessageDigest(),
"Unexpected Encoded Message Digest");
assertEquals(EXPECTED_DIGEST_ALGORITHM_ID,
messageDispositionNotificationEntity.getReceivedContentMic().getDigestAlgorithmId(),
"Unexpected Digest Algorithm ID");
}
|
@Override
public String toString()
{
StringBuilder sb = new StringBuilder();
sb.append(getClass().getName());
sb.append(" [HTTP Status:").append(_status == null ? "null" : _status.getCode());
if (_serviceErrorCode != null)
{
sb.append(", serviceErrorCode:").append(_serviceErrorCode);
}
if (hasCode())
{
sb.append(", code:").append(_code);
}
if (hasDocUrl())
{
sb.append(", docUrl:").append(_docUrl);
}
if (hasRequestId())
{
sb.append(", requestId:").append(_requestId);
}
sb.append("]");
String message = getLocalizedMessage();
if (message != null)
{
sb.append(": ").append(message);
}
return sb.toString();
}
|
@Test
public void testNullStatus()
{
final RestLiServiceException restLiServiceException = new RestLiServiceException((HttpStatus) null);
Assert.assertTrue(restLiServiceException.toString().contains("[HTTP Status:null]"));
}
|
@Override
public byte[] fromConnectData(String topic, Schema schema, Object value) {
if (schema == null && value == null) {
return null;
}
JsonNode jsonValue = config.schemasEnabled() ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value);
try {
return serializer.serialize(topic, jsonValue);
} catch (SerializationException e) {
throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e);
}
}
|
@Test
public void floatToJson() {
JsonNode converted = parse(converter.fromConnectData(TOPIC, Schema.FLOAT32_SCHEMA, 12.34f));
validateEnvelope(converted);
assertEquals(parse("{ \"type\": \"float\", \"optional\": false }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME));
assertEquals(12.34f, converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME).floatValue(), 0.001);
}
|
@Override
public KeyValueIterator<K, V> reverseRange(final K from, final K to) {
final NextIteratorFunction<K, V, ReadOnlyKeyValueStore<K, V>> nextIteratorFunction = new NextIteratorFunction<K, V, ReadOnlyKeyValueStore<K, V>>() {
@Override
public KeyValueIterator<K, V> apply(final ReadOnlyKeyValueStore<K, V> store) {
try {
return store.reverseRange(from, to);
} catch (final InvalidStateStoreException e) {
throw new InvalidStateStoreException("State store is not available anymore and may have been migrated to another instance; please re-discover its location from the state metadata.");
}
}
};
final List<ReadOnlyKeyValueStore<K, V>> stores = storeProvider.stores(storeName, storeType);
return new DelegatingPeekingKeyValueIterator<>(
storeName,
new CompositeKeyValueIterator<>(stores.iterator(), nextIteratorFunction));
}
|
@Test
public void shouldSupportReverseRangeAcrossMultipleKVStores() {
final KeyValueStore<String, String> cache = newStoreInstance();
stubProviderTwo.addStore(storeName, cache);
stubOneUnderlying.put("a", "a");
stubOneUnderlying.put("b", "b");
stubOneUnderlying.put("z", "z");
cache.put("c", "c");
cache.put("d", "d");
cache.put("x", "x");
final List<KeyValue<String, String>> results = toList(theStore.reverseRange("a", "e"));
assertTrue(results.contains(new KeyValue<>("a", "a")));
assertTrue(results.contains(new KeyValue<>("b", "b")));
assertTrue(results.contains(new KeyValue<>("c", "c")));
assertTrue(results.contains(new KeyValue<>("d", "d")));
assertEquals(4, results.size());
}
|
public static @Nullable CastRule<?, ?> resolve(LogicalType inputType, LogicalType targetType) {
return INSTANCE.internalResolve(inputType, targetType);
}
|
@Test
void testResolveDistinctTypeToIdentityCastRule() {
assertThat(CastRuleProvider.resolve(DISTINCT_INT, INT)).isSameAs(IdentityCastRule.INSTANCE);
assertThat(CastRuleProvider.resolve(INT, DISTINCT_INT)).isSameAs(IdentityCastRule.INSTANCE);
assertThat(CastRuleProvider.resolve(DISTINCT_INT, DISTINCT_INT))
.isSameAs(IdentityCastRule.INSTANCE);
}
|
public boolean isEnabled() {
return !allowedCpusList.isEmpty();
}
|
@Test
public void whenRange() {
ThreadAffinity threadAffinity = new ThreadAffinity("1-4");
assertTrue(threadAffinity.isEnabled());
assertEquals(4, threadAffinity.allowedCpusList.size());
assertEquals(threadAffinity.allowedCpusList.get(0), newBitset(1));
assertEquals(threadAffinity.allowedCpusList.get(1), newBitset(2));
assertEquals(threadAffinity.allowedCpusList.get(2), newBitset(3));
assertEquals(threadAffinity.allowedCpusList.get(3), newBitset(4));
}
|
@Override
public Object getObject(final int columnIndex) throws SQLException {
return mergeResultSet.getValue(columnIndex, Object.class);
}
|
@Test
void assertGetObjectWithTime() throws SQLException {
Time result = mock(Time.class);
when(mergeResultSet.getValue(1, Time.class)).thenReturn(result);
assertThat(shardingSphereResultSet.getObject(1, Time.class), is(result));
}
|
@Udf(description = "Converts a string representation of a date in the given format"
+ " into the number of days since 1970-01-01 00:00:00 UTC/GMT.")
public int stringToDate(
@UdfParameter(
description = "The string representation of a date.") final String formattedDate,
@UdfParameter(
description = "The format pattern should be in the format expected by"
+ " java.time.format.DateTimeFormatter.") final String formatPattern) {
// NB: We do not perform a null here preferring to throw an exception as
// there is no sentinel value for a "null" Date.
try {
final DateTimeFormatter formatter = formatters.get(formatPattern);
return ((int)LocalDate.parse(formattedDate, formatter).toEpochDay());
} catch (final ExecutionException | RuntimeException e) {
throw new KsqlFunctionException("Failed to parse date '" + formattedDate
+ "' with formatter '" + formatPattern
+ "': " + e.getMessage(), e);
}
}
|
@Test
public void shouldConvertStringToDate() {
// When:
final int result = udf.stringToDate("2021-12-01", "yyyy-MM-dd");
// Then:
assertThat(result, is(18962));
}
|
@Override
public void handleWayTags(int edgeId, EdgeIntAccess edgeIntAccess, ReaderWay readerWay, IntsRef relationFlags) {
String surfaceTag = readerWay.getTag("surface");
Surface surface = Surface.find(surfaceTag);
if (surface == MISSING)
return;
surfaceEnc.setEnum(false, edgeId, edgeIntAccess, surface);
}
|
@Test
public void testSubtypes() {
IntsRef relFlags = new IntsRef(2);
ReaderWay readerWay = new ReaderWay(1);
EdgeIntAccess edgeIntAccess = new ArrayEdgeIntAccess(1);
int edgeId = 0;
readerWay.setTag("highway", "primary");
readerWay.setTag("surface", "concrete:plates");
parser.handleWayTags(edgeId, edgeIntAccess, readerWay, relFlags);
assertEquals(Surface.CONCRETE, surfaceEnc.getEnum(false, edgeId, edgeIntAccess));
readerWay.setTag("surface", "cobblestone:flattened");
parser.handleWayTags(edgeId, edgeIntAccess, readerWay, relFlags);
assertEquals(Surface.COBBLESTONE, surfaceEnc.getEnum(false, edgeId, edgeIntAccess));
}
|
@Override
public void done(RouteMetricSet metrics) {
try {
output.println("# Time used, num ok, num error, min latency, max latency, average latency");
printMetrics(output, metrics);
} catch (Exception e) {
e.printStackTrace();
}
}
|
@Test
void testSimple() {
ByteArrayOutputStream output = new ByteArrayOutputStream();
ManualTimer timer = new ManualTimer();
BenchmarkProgressPrinter printer = new BenchmarkProgressPrinter(timer, new PrintStream(output));
RouteMetricSet metrics = new RouteMetricSet("foobar", timer, printer);
{
EmptyReply reply = new EmptyReply();
reply.setMessage(PutDocumentMessage.createEmpty().setTimeReceived(-1));
metrics.addReply(reply);
}
timer.set(1200);
{
EmptyReply reply = new EmptyReply();
reply.setMessage(PutDocumentMessage.createEmpty().setTimeReceived(-1));
metrics.addReply(reply);
}
{
EmptyReply reply = new EmptyReply();
reply.setMessage(UpdateDocumentMessage.createEmpty().setTimeReceived(-1));
metrics.addReply(reply);
}
timer.set(2400);
{
EmptyReply reply = new EmptyReply();
reply.setMessage(UpdateDocumentMessage.createEmpty().setTimeReceived(-1));
reply.addError(new com.yahoo.messagebus.Error(32, "foo"));
metrics.addReply(reply);
}
timer.set(62000);
{
EmptyReply reply = new EmptyReply();
reply.setMessage(UpdateDocumentMessage.createEmpty().setTimeReceived(-1));
reply.addError(new com.yahoo.messagebus.Error(64, "bar"));
metrics.addReply(reply);
}
metrics.done();
String val = output.toString().split("\n")[1];
String correctPattern = "62000, \\d+, \\d+, \\d+, \\d+, \\d+$";
assertTrue(val.matches(correctPattern), "Value '" + val + "' does not match pattern '" + correctPattern + "'");
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.