focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@Override
public boolean tryFence(HAServiceTarget target, String args) {
ProcessBuilder builder;
String cmd = parseArgs(target.getTransitionTargetHAStatus(), args);
if (!Shell.WINDOWS) {
builder = new ProcessBuilder("bash", "-e", "-c", cmd);
} else {
builder = new ProcessBuilder("cmd.exe", "/c", cmd);
}
setConfAsEnvVars(builder.environment());
addTargetInfoAsEnvVars(target, builder.environment());
Process p;
try {
p = builder.start();
p.getOutputStream().close();
} catch (IOException e) {
LOG.warn("Unable to execute " + cmd, e);
return false;
}
String pid = tryGetPid(p);
LOG.info("Launched fencing command '" + cmd + "' with "
+ ((pid != null) ? ("pid " + pid) : "unknown pid"));
String logPrefix = abbreviate(cmd, ABBREV_LENGTH);
if (pid != null) {
logPrefix = "[PID " + pid + "] " + logPrefix;
}
// Pump logs to stderr
StreamPumper errPumper = new StreamPumper(
LOG, logPrefix, p.getErrorStream(),
StreamPumper.StreamType.STDERR);
errPumper.start();
StreamPumper outPumper = new StreamPumper(
LOG, logPrefix, p.getInputStream(),
StreamPumper.StreamType.STDOUT);
outPumper.start();
int rc;
try {
rc = p.waitFor();
errPumper.join();
outPumper.join();
} catch (InterruptedException ie) {
LOG.warn("Interrupted while waiting for fencing command: " + cmd);
return false;
}
return rc == 0;
}
|
@Test
public void testConfAsEnvironment() {
if (!Shell.WINDOWS) {
fencer.tryFence(TEST_TARGET, "echo $in_fencing_tests");
Mockito.verify(ShellCommandFencer.LOG).info(
Mockito.endsWith("echo $in...ing_tests: yessir"));
} else {
fencer.tryFence(TEST_TARGET, "echo %in_fencing_tests%");
Mockito.verify(ShellCommandFencer.LOG).info(
Mockito.endsWith("echo %in...ng_tests%: yessir"));
}
}
|
public static KiePMMLTarget getKiePMMLTarget(final Target target) {
final List<TargetValue> targetValues = target.hasTargetValues() ? target.getTargetValues()
.stream()
.map(KiePMMLTargetInstanceFactory::getKieTargetValue)
.collect(Collectors.toList()) : Collections.emptyList();
final OP_TYPE opType = target.getOpType() != null ? OP_TYPE.byName(target.getOpType().value()) : null;
final String field = target.getField() != null ?target.getField() : null;
final CAST_INTEGER castInteger = target.getCastInteger() != null ?
CAST_INTEGER.byName(target.getCastInteger().value()) : null;
TargetField targetField = new TargetField(targetValues,
opType,
field,
castInteger,
target.getMin(),
target.getMax(),
target.getRescaleConstant(),
target.getRescaleFactor());
final KiePMMLTarget.Builder builder = KiePMMLTarget.builder(targetField.getName(), Collections.emptyList(),
targetField);
return builder.build();
}
|
@Test
void getKiePMMLTarget() {
final Target toConvert = getRandomTarget();
KiePMMLTarget retrieved = KiePMMLTargetInstanceFactory.getKiePMMLTarget(toConvert);
commonVerifyKiePMMLTarget(retrieved, toConvert);
}
|
public static Builder in(Table table) {
return new Builder(table);
}
|
@TestTemplate
public void testCaseSensitivity() {
table
.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.appendFile(FILE_C)
.appendFile(FILE_D)
.commit();
Iterable<DataFile> files =
FindFiles.in(table)
.caseInsensitive()
.withMetadataMatching(Expressions.startsWith("FILE_PATH", "/path/to/data-a"))
.collect();
assertThat(pathSet(files)).isEqualTo(pathSet(FILE_A));
}
|
public static <T> T decodeFromBase64(Coder<T> coder, String encodedValue) throws CoderException {
return decodeFromSafeStream(
coder,
new ByteArrayInputStream(BaseEncoding.base64Url().omitPadding().decode(encodedValue)),
Coder.Context.OUTER);
}
|
@Test
public void testClosingCoderFailsWhenDecodingBase64() throws Exception {
expectedException.expect(UnsupportedOperationException.class);
expectedException.expectMessage("Caller does not own the underlying");
CoderUtils.decodeFromBase64(new ClosingCoder(), "test-value");
}
|
@Override
public boolean isDone() {
return future.isDone();
}
|
@Test
public void test_isDone() {
Future<Object> future = new DelegatingCompletableFuture<>(serializationService, newCompletedFuture("value"));
assertTrue(future.isDone());
}
|
public FilePathInfo allocateFilePath(long dbId, long tableId) throws DdlException {
try {
FileStoreType fsType = getFileStoreType(Config.cloud_native_storage_type);
if (fsType == null || fsType == FileStoreType.INVALID) {
throw new DdlException("Invalid cloud native storage type: " + Config.cloud_native_storage_type);
}
String suffix = constructTablePath(dbId, tableId);
FilePathInfo pathInfo = client.allocateFilePath(serviceId, fsType, suffix);
LOG.debug("Allocate file path from starmgr: {}", pathInfo);
return pathInfo;
} catch (StarClientException e) {
throw new DdlException("Failed to allocate file path from StarMgr, error: " + e.getMessage());
}
}
|
@Test
public void testAllocateFilePath() throws StarClientException {
long dbId = 1000;
long tableId = 123;
new Expectations() {
{
client.allocateFilePath("1", FileStoreType.S3, anyString);
result = FilePathInfo.newBuilder().build();
minTimes = 0;
client.allocateFilePath("2", FileStoreType.S3, anyString);
result = new StarClientException(StatusCode.INVALID_ARGUMENT, "mocked exception");
}
};
Deencapsulation.setField(starosAgent, "serviceId", "1");
Config.cloud_native_storage_type = "s3";
ExceptionChecker.expectThrowsNoException(() -> starosAgent.allocateFilePath(dbId, tableId));
Config.cloud_native_storage_type = "ss";
ExceptionChecker.expectThrowsWithMsg(DdlException.class, "Invalid cloud native storage type: ss",
() -> starosAgent.allocateFilePath(dbId, tableId));
Config.cloud_native_storage_type = "s3";
Deencapsulation.setField(starosAgent, "serviceId", "2");
ExceptionChecker.expectThrowsWithMsg(DdlException.class,
"Failed to allocate file path from StarMgr, error: INVALID_ARGUMENT:mocked exception",
() -> starosAgent.allocateFilePath(dbId, tableId));
new Expectations() {
{
client.allocateFilePath("1", "test-fskey", anyString);
result = FilePathInfo.newBuilder().build();
minTimes = 0;
client.allocateFilePath("2", "test-fskey", anyString);
result = new StarClientException(StatusCode.INVALID_ARGUMENT, "mocked exception");
}
};
Config.cloud_native_storage_type = "s3";
Deencapsulation.setField(starosAgent, "serviceId", "1");
ExceptionChecker.expectThrowsNoException(() -> starosAgent.allocateFilePath("test-fskey", dbId, tableId));
Deencapsulation.setField(starosAgent, "serviceId", "2");
ExceptionChecker.expectThrowsWithMsg(DdlException.class,
"Failed to allocate file path from StarMgr, error: INVALID_ARGUMENT:mocked exception",
() -> starosAgent.allocateFilePath("test-fskey", dbId, tableId));
}
|
@Override
public Page<ConfigHistoryInfo> findConfigHistory(String dataId, String group, String tenant, int pageNo,
int pageSize) {
PaginationHelper<ConfigHistoryInfo> helper = createPaginationHelper();
String tenantTmp = StringUtils.isBlank(tenant) ? StringUtils.EMPTY : tenant;
MapperContext context = new MapperContext((pageNo - 1) * pageSize, pageSize);
context.putWhereParameter(FieldConstant.DATA_ID, dataId);
context.putWhereParameter(FieldConstant.GROUP_ID, group);
context.putWhereParameter(FieldConstant.TENANT_ID, tenantTmp);
HistoryConfigInfoMapper historyConfigInfoMapper = mapperManager.findMapper(
dataSourceService.getDataSourceType(), TableConstant.HIS_CONFIG_INFO);
String sqlCountRows = historyConfigInfoMapper.count(Arrays.asList("data_id", "group_id", "tenant_id"));
MapperResult sqlFetchRows = historyConfigInfoMapper.pageFindConfigHistoryFetchRows(context);
Page<ConfigHistoryInfo> page;
try {
page = helper.fetchPage(sqlCountRows, sqlFetchRows.getSql(), sqlFetchRows.getParamList().toArray(), pageNo,
pageSize, HISTORY_LIST_ROW_MAPPER);
} catch (DataAccessException e) {
LogUtil.FATAL_LOG.error("[list-config-history] error, dataId:{}, group:{}", new Object[] {dataId, group},
e);
throw e;
}
return page;
}
|
@Test
void testFindConfigHistory() {
String dataId = "dataId34567";
String group = "group34567";
String tenant = "tenant34567";
//mock count
Mockito.when(jdbcTemplate.queryForObject(anyString(), eq(new Object[] {dataId, group, tenant}), eq(Integer.class))).thenReturn(300);
//mock list
List<ConfigHistoryInfo> mockList = new ArrayList<>();
mockList.add(createMockConfigHistoryInfo(0));
mockList.add(createMockConfigHistoryInfo(1));
mockList.add(createMockConfigHistoryInfo(2));
Mockito.when(jdbcTemplate.query(anyString(), eq(new Object[] {dataId, group, tenant}), eq(HISTORY_LIST_ROW_MAPPER)))
.thenReturn(mockList);
int pageSize = 100;
int pageNo = 2;
//execute & verify
Page<ConfigHistoryInfo> historyReturn = externalHistoryConfigInfoPersistService.findConfigHistory(dataId, group, tenant, pageNo,
pageSize);
assertEquals(mockList, historyReturn.getPageItems());
assertEquals(300, historyReturn.getTotalCount());
//mock exception
Mockito.when(jdbcTemplate.queryForObject(anyString(), eq(new Object[] {dataId, group, tenant}), eq(Integer.class)))
.thenThrow(new CannotGetJdbcConnectionException("conn error111"));
try {
externalHistoryConfigInfoPersistService.findConfigHistory(dataId, group, tenant, pageNo, pageSize);
assertTrue(false);
} catch (Exception e) {
assertEquals("conn error111", e.getMessage());
}
}
|
@Override
public double entropy() {
return Math.log(scale) + 2;
}
|
@Test
public void testEntropy() {
System.out.println("entropy");
LogisticDistribution instance = new LogisticDistribution(2.0, 1.0);
instance.rand();
assertEquals(2.0, instance.mean(), 1E-7);
}
|
protected void decodeLast(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception {
if (in.isReadable()) {
// Only call decode() if there is something left in the buffer to decode.
// See https://github.com/netty/netty/issues/4386
decodeRemovalReentryProtection(ctx, in, out);
}
}
|
@Test
public void testDecodeLast() {
final AtomicBoolean removeHandler = new AtomicBoolean();
EmbeddedChannel channel = new EmbeddedChannel(new ByteToMessageDecoder() {
@Override
protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) {
if (removeHandler.get()) {
ctx.pipeline().remove(this);
}
}
});
byte[] bytes = new byte[1024];
PlatformDependent.threadLocalRandom().nextBytes(bytes);
assertFalse(channel.writeInbound(Unpooled.copiedBuffer(bytes)));
assertNull(channel.readInbound());
removeHandler.set(true);
// This should trigger channelInputClosed(...)
channel.pipeline().fireUserEventTriggered(ChannelInputShutdownEvent.INSTANCE);
assertTrue(channel.finish());
assertBuffer(Unpooled.wrappedBuffer(bytes), (ByteBuf) channel.readInbound());
assertNull(channel.readInbound());
}
|
public void schedule(BeanContainer container, String id, String cron, String interval, String zoneId, String className, String methodName, List<JobParameter> parameterList) {
JobScheduler scheduler = container.beanInstance(JobScheduler.class);
String jobId = getId(id);
String optionalCronExpression = getCronExpression(cron);
String optionalInterval = getInterval(interval);
if (StringUtils.isNullOrEmpty(cron) && StringUtils.isNullOrEmpty(optionalInterval))
throw new IllegalArgumentException("Either cron or interval attribute is required.");
if (StringUtils.isNotNullOrEmpty(cron) && StringUtils.isNotNullOrEmpty(optionalInterval))
throw new IllegalArgumentException("Both cron and interval attribute provided. Only one is allowed.");
if (Recurring.RECURRING_JOB_DISABLED.equals(optionalCronExpression) || Recurring.RECURRING_JOB_DISABLED.equals(optionalInterval)) {
if (isNullOrEmpty(jobId)) {
LOGGER.warn("You are trying to disable a recurring job using placeholders but did not define an id.");
} else {
scheduler.deleteRecurringJob(jobId);
}
} else {
JobDetails jobDetails = new JobDetails(className, null, methodName, parameterList);
jobDetails.setCacheable(true);
if (isNotNullOrEmpty(optionalCronExpression)) {
scheduler.scheduleRecurrently(id, jobDetails, CronExpression.create(optionalCronExpression), getZoneId(zoneId));
} else {
scheduler.scheduleRecurrently(id, jobDetails, new Interval(optionalInterval), getZoneId(zoneId));
}
}
}
|
@Test
void scheduleSchedulesIntervalJobWithJobRunr() {
final String id = "my-job-id";
final JobDetails jobDetails = jobDetails().build();
final String cron = null;
final String interval = "PT10M";
final String zoneId = null;
jobRunrRecurringJobRecorder.schedule(beanContainer, id, cron, interval, zoneId, jobDetails.getClassName(), jobDetails.getMethodName(), jobDetails.getJobParameters());
verify(jobScheduler).scheduleRecurrently(eq(id), jobDetailsArgumentCaptor.capture(), eq(new Interval("PT10M")), eq(ZoneId.systemDefault()));
assertThat(jobDetailsArgumentCaptor.getValue())
.hasClassName(jobDetails.getClassName())
.hasMethodName(jobDetails.getMethodName())
.hasArgs(jobDetails.getJobParameterValues());
}
|
@Override
public void load(String mountTableConfigPath, Configuration conf)
throws IOException {
this.mountTable = new Path(mountTableConfigPath);
String scheme = mountTable.toUri().getScheme();
FsGetter fsGetter = new ViewFileSystemOverloadScheme.ChildFsGetter(scheme);
try (FileSystem fs = fsGetter.getNewInstance(mountTable.toUri(), conf)) {
RemoteIterator<LocatedFileStatus> listFiles =
fs.listFiles(mountTable, false);
LocatedFileStatus lfs = null;
int higherVersion = -1;
while (listFiles.hasNext()) {
LocatedFileStatus curLfs = listFiles.next();
String cur = curLfs.getPath().getName();
String[] nameParts = cur.split(REGEX_DOT);
if (nameParts.length < 2) {
logInvalidFileNameFormat(cur);
continue; // invalid file name
}
int curVersion = higherVersion;
try {
curVersion = Integer.parseInt(nameParts[nameParts.length - 2]);
} catch (NumberFormatException nfe) {
logInvalidFileNameFormat(cur);
continue;
}
if (curVersion > higherVersion) {
higherVersion = curVersion;
lfs = curLfs;
}
}
if (lfs == null) {
// No valid mount table file found.
// TODO: Should we fail? Currently viewfs init will fail if no mount
// links anyway.
LOGGER.warn("No valid mount-table file exist at: {}. At least one "
+ "mount-table file should present with the name format: "
+ "mount-table.<versionNumber>.xml", mountTableConfigPath);
return;
}
// Latest version file.
Path latestVersionMountTable = lfs.getPath();
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Loading the mount-table {} into configuration.",
latestVersionMountTable);
}
try (FSDataInputStream open = fs.open(latestVersionMountTable)) {
Configuration newConf = new Configuration(false);
newConf.addResource(open);
// This will add configuration props as resource, instead of stream
// itself. So, that stream can be closed now.
conf.addResource(newConf);
}
}
}
|
@Test
public void testLoadWithNonExistentMountFile() throws Exception {
ViewFsTestSetup.addMountLinksToFile(TABLE_NAME,
new String[] {SRC_ONE, SRC_TWO },
new String[] {TARGET_ONE, TARGET_TWO },
new Path(oldVersionMountTableFile.toURI()), conf);
loader.load(oldVersionMountTableFile.toURI().toString(), conf);
Assert.assertEquals(conf.get(MOUNT_LINK_KEY_SRC_TWO), TARGET_TWO);
Assert.assertEquals(conf.get(MOUNT_LINK_KEY_SRC_ONE), TARGET_ONE);
}
|
@Override
public DictDataDO parseDictData(String dictType, String label) {
return dictDataMapper.selectByDictTypeAndLabel(dictType, label);
}
|
@Test
public void testParseDictData() {
// mock 数据
DictDataDO dictDataDO = randomDictDataDO().setDictType("yunai").setLabel("1");
dictDataMapper.insert(dictDataDO);
DictDataDO dictDataDO02 = randomDictDataDO().setDictType("yunai").setLabel("2");
dictDataMapper.insert(dictDataDO02);
// 准备参数
String dictType = "yunai";
String label = "1";
// 调用
DictDataDO dbDictData = dictDataService.parseDictData(dictType, label);
// 断言
assertEquals(dictDataDO, dbDictData);
}
|
@Override
public boolean isManagedIndex(String index) {
return !isNullOrEmpty(index) && !isWriteIndexAlias(index) && indexPattern.matcher(index).matches();
}
|
@Test
public void identifiesIndicesWithPlusAsBeingManaged() {
final IndexSetConfig configWithPlus = config.toBuilder().indexPrefix("some+index").build();
final String indexName = configWithPlus.indexPrefix() + "_0";
final MongoIndexSet mongoIndexSet = createIndexSet(configWithPlus);
assertThat(mongoIndexSet.isManagedIndex(indexName)).isTrue();
}
|
public static String format(long size) {
if (size <= 0) {
return "0";
}
int digitGroups = Math.min(DataUnit.UNIT_NAMES.length-1, (int) (Math.log10(size) / Math.log10(1024)));
return new DecimalFormat("#,##0.##")
.format(size / Math.pow(1024, digitGroups)) + " " + DataUnit.UNIT_NAMES[digitGroups];
}
|
@Test
public void formatTest(){
String format = DataSizeUtil.format(Long.MAX_VALUE);
assertEquals("8 EB", format);
format = DataSizeUtil.format(1024L * 1024 * 1024 * 1024 * 1024);
assertEquals("1 PB", format);
format = DataSizeUtil.format(1024L * 1024 * 1024 * 1024);
assertEquals("1 TB", format);
}
|
@Override
public Map<RedisClusterNode, Collection<RedisClusterNode>> clusterGetMasterSlaveMap() {
Iterable<RedisClusterNode> res = clusterGetNodes();
Set<RedisClusterNode> masters = new HashSet<RedisClusterNode>();
for (Iterator<RedisClusterNode> iterator = res.iterator(); iterator.hasNext();) {
RedisClusterNode redisClusterNode = iterator.next();
if (redisClusterNode.isMaster()) {
masters.add(redisClusterNode);
}
}
Map<RedisClusterNode, Collection<RedisClusterNode>> result = new HashMap<RedisClusterNode, Collection<RedisClusterNode>>();
for (Iterator<RedisClusterNode> iterator = res.iterator(); iterator.hasNext();) {
RedisClusterNode redisClusterNode = iterator.next();
for (RedisClusterNode masterNode : masters) {
if (redisClusterNode.getMasterId() != null
&& redisClusterNode.getMasterId().equals(masterNode.getId())) {
Collection<RedisClusterNode> list = result.get(masterNode);
if (list == null) {
list = new ArrayList<RedisClusterNode>();
result.put(masterNode, list);
}
list.add(redisClusterNode);
}
}
}
return result;
}
|
@Test
public void testClusterGetMasterSlaveMap() {
Map<RedisClusterNode, Collection<RedisClusterNode>> map = connection.clusterGetMasterSlaveMap();
assertThat(map).hasSize(3);
for (Collection<RedisClusterNode> slaves : map.values()) {
assertThat(slaves).hasSize(1);
}
}
|
@Override
public WindowStoreIterator<byte[]> backwardFetch(final Bytes key,
final long timeFrom,
final long timeTo) {
return wrapped().backwardFetch(key, timeFrom, timeTo);
}
|
@Test
public void shouldDelegateToUnderlyingStoreWhenBackwardFetching() {
store.backwardFetch(bytesKey, ofEpochMilli(0), ofEpochMilli(10));
verify(inner).backwardFetch(bytesKey, 0, 10);
}
|
@Override
public RemotingCommand processRequest(ChannelHandlerContext ctx,
RemotingCommand request) throws RemotingCommandException {
SendMessageContext mqtraceContext = null;
SendMessageRequestHeader requestHeader = parseRequestHeader(request);
if (requestHeader == null) {
return null;
}
mqtraceContext = buildMsgContext(ctx, requestHeader, request);
this.executeSendMessageHookBefore(mqtraceContext);
RemotingCommand response = this.processReplyMessageRequest(ctx, request, mqtraceContext, requestHeader);
this.executeSendMessageHookAfter(response, mqtraceContext);
return response;
}
|
@Test
public void testProcessRequest_Success() throws RemotingCommandException, InterruptedException, RemotingTimeoutException, RemotingSendRequestException {
when(messageStore.putMessage(any(MessageExtBrokerInner.class))).thenReturn(new PutMessageResult(PutMessageStatus.PUT_OK, new AppendMessageResult(AppendMessageStatus.PUT_OK)));
brokerController.getProducerManager().registerProducer(group, clientInfo);
final RemotingCommand request = createSendMessageRequestHeaderCommand(RequestCode.SEND_REPLY_MESSAGE);
when(brokerController.getBroker2Client().callClient(any(), any(RemotingCommand.class))).thenReturn(createResponse(ResponseCode.SUCCESS, request));
RemotingCommand responseToReturn = replyMessageProcessor.processRequest(handlerContext, request);
assertThat(responseToReturn.getCode()).isEqualTo(ResponseCode.SUCCESS);
assertThat(responseToReturn.getOpaque()).isEqualTo(request.getOpaque());
}
|
@Override
public Map<String, Object> info() {
Map<String, Object> info = new HashMap<>(4);
info.put("addressServerHealth", isAddressServerHealth);
info.put("addressServerUrl", addressServerUrl);
info.put("envIdUrl", envIdUrl);
info.put("addressServerFailCount", addressServerFailCount);
return info;
}
|
@Test
void testInfo() {
Map<String, Object> infos = addressServerMemberLookup.info();
assertEquals(4, infos.size());
assertTrue(infos.containsKey("addressServerHealth"));
assertTrue(infos.containsKey("addressServerUrl"));
assertTrue(infos.containsKey("envIdUrl"));
assertTrue(infos.containsKey("addressServerFailCount"));
assertEquals(addressServerUrl, infos.get("addressServerUrl"));
assertEquals(envIdUrl, infos.get("envIdUrl"));
}
|
public static <T> List<T> batchTransform(final Class<T> clazz, List<?> srcList) {
if (CollectionUtils.isEmpty(srcList)) {
return Collections.emptyList();
}
List<T> result = new ArrayList<>(srcList.size());
for (Object srcObject : srcList) {
result.add(transform(clazz, srcObject));
}
return result;
}
|
@Test
public void testBatchTransformListIsEmpty() {
assertNotNull(BeanUtils.batchTransform(String.class, someList));
}
|
public URI getServerAddress() {
return serverAddresses.get(0);
}
|
@Test
public void shouldParseHttpAddressWithoutPort() throws Exception {
// Given:
final String serverAddress = "http://singleServer";
final URI serverURI = new URI(serverAddress.concat(":80"));
// When:
try (KsqlRestClient ksqlRestClient = clientWithServerAddresses(serverAddress)) {
// Then:
assertThat(ksqlRestClient.getServerAddress(), is(serverURI));
}
}
|
public boolean compatibleVersion(String acceptableVersionRange, String actualVersion) {
V pluginVersion = parseVersion(actualVersion);
// Treat a single version "1.4" as a left bound, equivalent to "[1.4,)"
if (acceptableVersionRange.matches(VERSION_REGEX)) {
return ge(pluginVersion, parseVersion(acceptableVersionRange));
}
// Otherwise ensure it is a version range with bounds
Matcher matcher = INTERVAL_PATTERN.matcher(acceptableVersionRange);
Preconditions.checkArgument(matcher.matches(), "invalid version range");
String leftBound = matcher.group("left");
String rightBound = matcher.group("right");
Preconditions.checkArgument(
leftBound != null || rightBound != null, "left and right bounds cannot both be empty");
BiPredicate<V, V> leftComparator =
acceptableVersionRange.startsWith("[") ? VersionChecker::ge : VersionChecker::gt;
BiPredicate<V, V> rightComparator =
acceptableVersionRange.endsWith("]") ? VersionChecker::le : VersionChecker::lt;
if (leftBound != null && !leftComparator.test(pluginVersion, parseVersion(leftBound))) {
return false;
}
if (rightBound != null && !rightComparator.test(pluginVersion, parseVersion(rightBound))) {
return false;
}
return true;
}
|
@Test
public void testMinimumBound_exact() {
Assert.assertTrue(checker.compatibleVersion("2.3", "2.3"));
}
|
public static void flip(File imageFile, File outFile) throws IORuntimeException {
BufferedImage image = null;
try {
image = read(imageFile);
flip(image, outFile);
} finally {
flush(image);
}
}
|
@Test
@Disabled
public void flipTest() {
ImgUtil.flip(FileUtil.file("d:/logo.png"), FileUtil.file("d:/result.png"));
}
|
@Nonnull
@Override
public Sketch getResult() {
return unionAll();
}
|
@Test
public void testThresholdBehavior() {
UpdateSketch input1 = Sketches.updateSketchBuilder().build();
IntStream.range(0, 1000).forEach(input1::update);
Sketch sketch1 = input1.compact();
UpdateSketch input2 = Sketches.updateSketchBuilder().build();
IntStream.range(1000, 2000).forEach(input2::update);
Sketch sketch2 = input2.compact();
ThetaSketchAccumulator accumulator = new ThetaSketchAccumulator(_setOperationBuilder, 3);
accumulator.apply(sketch1);
accumulator.apply(sketch2);
Assert.assertEquals(accumulator.getResult().getEstimate(), sketch1.getEstimate() + sketch2.getEstimate());
}
|
public ProviderGroup addAll(Collection<ProviderInfo> providerInfos) {
if (CommonUtils.isEmpty(providerInfos)) {
return this;
}
ConcurrentHashSet<ProviderInfo> tmp = new ConcurrentHashSet<ProviderInfo>(this.providerInfos);
tmp.addAll(providerInfos); // 排重
this.providerInfos = new ArrayList<ProviderInfo>(tmp);
return this;
}
|
@Test
public void addAll() throws Exception {
ProviderGroup pg = new ProviderGroup("xxx", null);
Assert.assertTrue(pg.size() == 0);
pg.addAll(null);
Assert.assertTrue(pg.size() == 0);
pg.addAll(new ArrayList<ProviderInfo>());
Assert.assertTrue(pg.size() == 0);
pg.addAll(Arrays.asList(ProviderHelper.toProviderInfo("127.0.0.1:12200"),
ProviderHelper.toProviderInfo("127.0.0.1:12201")));
Assert.assertTrue(pg.size() == 2);
pg.addAll(Collections.singleton(ProviderHelper.toProviderInfo("127.0.0.1:12202")));
Assert.assertTrue(pg.size() == 3);
// 重复
pg.addAll(Arrays.asList(ProviderHelper.toProviderInfo("127.0.0.1:12201"),
ProviderHelper.toProviderInfo("127.0.0.1:12203")));
Assert.assertTrue(pg.size() == 4);
// 重复
pg.addAll(Arrays.asList(ProviderHelper.toProviderInfo("127.0.0.1:12201"),
ProviderHelper.toProviderInfo("127.0.0.1:12202")));
Assert.assertTrue(pg.size() == 4);
}
|
public void isInStrictOrder() {
isInStrictOrder(Ordering.natural());
}
|
@Test
public void isInStrictOrderWithNonComparableElementsFailure() {
try {
assertThat(asList((Object) 1, "2", 3, "4")).isInStrictOrder();
fail("Should have thrown.");
} catch (ClassCastException expected) {
}
}
|
public void schedule(BeanContainer container, String id, String cron, String interval, String zoneId, String className, String methodName, List<JobParameter> parameterList) {
JobScheduler scheduler = container.beanInstance(JobScheduler.class);
String jobId = getId(id);
String optionalCronExpression = getCronExpression(cron);
String optionalInterval = getInterval(interval);
if (StringUtils.isNullOrEmpty(cron) && StringUtils.isNullOrEmpty(optionalInterval))
throw new IllegalArgumentException("Either cron or interval attribute is required.");
if (StringUtils.isNotNullOrEmpty(cron) && StringUtils.isNotNullOrEmpty(optionalInterval))
throw new IllegalArgumentException("Both cron and interval attribute provided. Only one is allowed.");
if (Recurring.RECURRING_JOB_DISABLED.equals(optionalCronExpression) || Recurring.RECURRING_JOB_DISABLED.equals(optionalInterval)) {
if (isNullOrEmpty(jobId)) {
LOGGER.warn("You are trying to disable a recurring job using placeholders but did not define an id.");
} else {
scheduler.deleteRecurringJob(jobId);
}
} else {
JobDetails jobDetails = new JobDetails(className, null, methodName, parameterList);
jobDetails.setCacheable(true);
if (isNotNullOrEmpty(optionalCronExpression)) {
scheduler.scheduleRecurrently(id, jobDetails, CronExpression.create(optionalCronExpression), getZoneId(zoneId));
} else {
scheduler.scheduleRecurrently(id, jobDetails, new Interval(optionalInterval), getZoneId(zoneId));
}
}
}
|
@Test
void scheduleDeletesJobFromJobRunrIfCronExpressionIsCronDisabled() {
final String id = "my-job-id";
final JobDetails jobDetails = jobDetails().build();
final String cron = "-";
final String interval = null;
final String zoneId = null;
jobRunrRecurringJobRecorder.schedule(beanContainer, id, cron, interval, zoneId, jobDetails.getClassName(), jobDetails.getMethodName(), jobDetails.getJobParameters());
verify(jobScheduler).deleteRecurringJob(id);
}
|
@Override
public ReservationListResponse listReservations(
ReservationListRequest requestInfo) throws YarnException, IOException {
// Check if reservation system is enabled
checkReservationSystem();
ReservationListResponse response =
recordFactory.newRecordInstance(ReservationListResponse.class);
Plan plan = rValidator.validateReservationListRequest(
reservationSystem, requestInfo);
boolean includeResourceAllocations = requestInfo
.getIncludeResourceAllocations();
ReservationId reservationId = null;
if (requestInfo.getReservationId() != null && !requestInfo
.getReservationId().isEmpty()) {
reservationId = ReservationId.parseReservationId(
requestInfo.getReservationId());
}
checkReservationACLs(requestInfo.getQueue(),
AuditConstants.LIST_RESERVATION_REQUEST, reservationId);
long startTime = Math.max(requestInfo.getStartTime(), 0);
long endTime = requestInfo.getEndTime() <= -1? Long.MAX_VALUE : requestInfo
.getEndTime();
Set<ReservationAllocation> reservations;
reservations = plan.getReservations(reservationId, new ReservationInterval(
startTime, endTime));
List<ReservationAllocationState> info =
ReservationSystemUtil.convertAllocationsToReservationInfo(
reservations, includeResourceAllocations);
response.setReservationAllocationState(info);
return response;
}
|
@Test
public void testListReservationsByTimeIntervalContainingNoReservations() {
resourceManager = setupResourceManager();
ClientRMService clientService = resourceManager.getClientRMService();
Clock clock = new UTCClock();
long arrival = clock.getTime();
long duration = 60000;
long deadline = (long) (arrival + 1.05 * duration);
ReservationSubmissionRequest sRequest =
submitReservationTestHelper(clientService, arrival, deadline, duration);
// List reservations, search by very large start time.
ReservationListRequest request = ReservationListRequest.newInstance(
ReservationSystemTestUtil.reservationQ, "", Long.MAX_VALUE, -1, false);
ReservationListResponse response = null;
try {
response = clientService.listReservations(request);
} catch (Exception e) {
Assert.fail(e.getMessage());
}
// Ensure all reservations are filtered out.
Assert.assertNotNull(response);
assertThat(response.getReservationAllocationState()).isEmpty();
duration = 30000;
deadline = sRequest.getReservationDefinition().getDeadline();
// List reservations, search by start time after the reservation
// end time.
request = ReservationListRequest.newInstance(
ReservationSystemTestUtil.reservationQ, "", deadline + duration,
deadline + 2 * duration, false);
response = null;
try {
response = clientService.listReservations(request);
} catch (Exception e) {
Assert.fail(e.getMessage());
}
// Ensure all reservations are filtered out.
Assert.assertNotNull(response);
assertThat(response.getReservationAllocationState()).isEmpty();
arrival = clock.getTime();
// List reservations, search by end time before the reservation start
// time.
request = ReservationListRequest.newInstance(
ReservationSystemTestUtil.reservationQ, "", 0, arrival - duration,
false);
response = null;
try {
response = clientService.listReservations(request);
} catch (Exception e) {
Assert.fail(e.getMessage());
}
// Ensure all reservations are filtered out.
Assert.assertNotNull(response);
assertThat(response.getReservationAllocationState()).isEmpty();
// List reservations, search by very small end time.
request = ReservationListRequest
.newInstance(ReservationSystemTestUtil.reservationQ, "", 0, 1, false);
response = null;
try {
response = clientService.listReservations(request);
} catch (Exception e) {
Assert.fail(e.getMessage());
}
// Ensure all reservations are filtered out.
Assert.assertNotNull(response);
assertThat(response.getReservationAllocationState()).isEmpty();
}
|
@Override
public KeyValueIterator<K, V> range(final K from, final K to) {
final NextIteratorFunction<K, V, ReadOnlyKeyValueStore<K, V>> nextIteratorFunction = new NextIteratorFunction<K, V, ReadOnlyKeyValueStore<K, V>>() {
@Override
public KeyValueIterator<K, V> apply(final ReadOnlyKeyValueStore<K, V> store) {
try {
return store.range(from, to);
} catch (final InvalidStateStoreException e) {
throw new InvalidStateStoreException("State store is not available anymore and may have been migrated to another instance; please re-discover its location from the state metadata.");
}
}
};
final List<ReadOnlyKeyValueStore<K, V>> stores = storeProvider.stores(storeName, storeType);
return new DelegatingPeekingKeyValueIterator<>(
storeName,
new CompositeKeyValueIterator<>(stores.iterator(), nextIteratorFunction));
}
|
@Test
public void shouldThrowNoSuchElementExceptionWhileNext() {
stubOneUnderlying.put("a", "1");
try (final KeyValueIterator<String, String> keyValueIterator = theStore.range("a", "b")) {
keyValueIterator.next();
assertThrows(NoSuchElementException.class, keyValueIterator::next);
}
}
|
public static <T> RestResult<T> buildResult(IResultCode resultCode, T data) {
return RestResult.<T>builder().withCode(resultCode.getCode()).withMsg(resultCode.getCodeMsg()).withData(data).build();
}
|
@Test
void testBuildResult() {
IResultCode mockCode = new IResultCode() {
@Override
public int getCode() {
return 503;
}
@Override
public String getCodeMsg() {
return "limited";
}
};
RestResult<String> restResult = RestResultUtils.buildResult(mockCode, "content");
assertRestResult(restResult, 503, "limited", "content", false);
}
|
public static IpAddress valueOf(int value) {
byte[] bytes =
ByteBuffer.allocate(INET_BYTE_LENGTH).putInt(value).array();
return new IpAddress(Version.INET, bytes);
}
|
@Test(expected = NullPointerException.class)
public void testInvalidValueOfNullArrayIPv4() {
IpAddress ipAddress;
byte[] value;
value = null;
ipAddress = IpAddress.valueOf(IpAddress.Version.INET, value);
}
|
public URI getHttpPublishUri() {
if (httpPublishUri == null) {
final URI defaultHttpUri = getDefaultHttpUri();
LOG.debug("No \"http_publish_uri\" set. Using default <{}>.", defaultHttpUri);
return defaultHttpUri;
} else {
final InetAddress inetAddress = toInetAddress(httpPublishUri.getHost());
if (Tools.isWildcardInetAddress(inetAddress)) {
final URI defaultHttpUri = getDefaultHttpUri(httpPublishUri.getPath());
LOG.warn("\"{}\" is not a valid setting for \"http_publish_uri\". Using default <{}>.", httpPublishUri, defaultHttpUri);
return defaultHttpUri;
} else {
return Tools.normalizeURI(httpPublishUri, httpPublishUri.getScheme(), GRAYLOG_DEFAULT_PORT, httpPublishUri.getPath());
}
}
}
|
@Test
public void testHttpPublishUriWildcardKeepsPath() throws RepositoryException, ValidationException {
final Map<String, String> properties = ImmutableMap.of(
"http_bind_address", "0.0.0.0:9000",
"http_publish_uri", "http://0.0.0.0:9000/api/");
jadConfig.setRepository(new InMemoryRepository(properties)).addConfigurationBean(configuration).process();
assertThat(configuration.getHttpPublishUri())
.hasPath("/api/")
.isNotEqualTo(URI.create("http://0.0.0.0:9000/api/"));
}
|
public final void hasSize(int expectedSize) {
checkArgument(expectedSize >= 0, "expectedSize(%s) must be >= 0", expectedSize);
check("size()").that(checkNotNull(actual).size()).isEqualTo(expectedSize);
}
|
@Test
public void hasSizeNegative() {
try {
assertThat(ImmutableMultimap.of(1, 2)).hasSize(-1);
fail();
} catch (IllegalArgumentException expected) {
}
}
|
public static List<String> buildList(Object propertyValue, String listSeparator)
{
List<String> valueList = new ArrayList<>();
if (propertyValue != null)
{
if (propertyValue instanceof List<?>)
{
@SuppressWarnings("unchecked")
List<String> list = (List<String>)propertyValue;
valueList.addAll(list);
}
else
{
// list was expressed as a String in the config
String propertyValueString = (String)propertyValue;
if (listSeparator == null)
{
throw new IllegalArgumentException("The separator cannot be null!");
}
for (String value: propertyValueString.split(listSeparator))
{
if (!value.isEmpty())
{
valueList.add(value.trim());
}
}
}
}
return valueList;
}
|
@Test
public void testNullObject()
{
List<String> emptyList = ConfigValueExtractor.buildList(null, ",");
Assert.assertTrue(emptyList.isEmpty());
}
|
@Override
public void handleWayTags(int edgeId, EdgeIntAccess edgeIntAccess, ReaderWay way, IntsRef relationFlags) {
OSMValueExtractor.extractTons(edgeId, edgeIntAccess, way, weightEncoder, MAX_WEIGHT_TAGS);
// vehicle:conditional no @ (weight > 7.5)
for (String restriction : HGV_RESTRICTIONS) {
String value = way.getTag(restriction, "");
if (value.startsWith("no") && value.indexOf("@") < 6) { // no,none[ ]@
double dec = OSMValueExtractor.conditionalWeightToTons(value);
if (!Double.isNaN(dec)) weightEncoder.setDecimal(false, edgeId, edgeIntAccess, dec);
}
}
}
|
@Test
public void testSimpleTags() {
EdgeIntAccess edgeIntAccess = new ArrayEdgeIntAccess(1);
ReaderWay readerWay = new ReaderWay(1);
readerWay.setTag("highway", "primary");
readerWay.setTag("maxweight", "5");
parser.handleWayTags(edgeId, edgeIntAccess, readerWay, relFlags);
assertEquals(5.0, mwEnc.getDecimal(false, edgeId, edgeIntAccess), .01);
// if value is beyond the maximum then do not use infinity instead fallback to more restrictive maximum
edgeIntAccess = new ArrayEdgeIntAccess(1);
readerWay.setTag("maxweight", "54");
parser.handleWayTags(edgeId, edgeIntAccess, readerWay, relFlags);
assertEquals(51, mwEnc.getDecimal(false, edgeId, edgeIntAccess), .01);
}
|
public static StatementExecutorResponse execute(
final ConfiguredStatement<ListQueries> statement,
final SessionProperties sessionProperties,
final KsqlExecutionContext executionContext,
final ServiceContext serviceContext
) {
final RemoteHostExecutor remoteHostExecutor = RemoteHostExecutor.create(
statement,
sessionProperties,
executionContext,
serviceContext.getKsqlClient()
);
return statement.getStatement().getShowExtended()
? executeExtended(statement, sessionProperties, executionContext, remoteHostExecutor)
: executeSimple(statement, executionContext, remoteHostExecutor);
}
|
@Test
public void shouldScatterGatherAndMergeShowQueries() {
// Given
when(sessionProperties.getInternalRequest()).thenReturn(false);
final ConfiguredStatement<?> showQueries = engine.configure("SHOW QUERIES;");
final PersistentQueryMetadata localMetadata = givenPersistentQuery("id", RUNNING_QUERY_STATE);
final PersistentQueryMetadata remoteMetadata = givenPersistentQuery("id", ERROR_QUERY_STATE);
when(mockKsqlEngine.getAllLiveQueries()).thenReturn(ImmutableList.of(localMetadata));
when(mockKsqlEngine.getPersistentQueries()).thenReturn(ImmutableList.of(localMetadata));
final List<RunningQuery> remoteRunningQueries = Collections.singletonList(persistentQueryMetadataToRunningQuery(
remoteMetadata,
new QueryStatusCount(Collections.singletonMap(KsqlQueryStatus.ERROR, 1))));
when(remoteQueries.getQueries()).thenReturn(remoteRunningQueries);
when(ksqlEntityList.get(anyInt())).thenReturn(remoteQueries);
when(response.getResponse()).thenReturn(ksqlEntityList);
queryStatusCount.updateStatusCount(RUNNING_QUERY_STATE, 1);
queryStatusCount.updateStatusCount(ERROR_QUERY_STATE, 1);
// When
final Queries queries = (Queries) CustomExecutors.LIST_QUERIES.execute(
showQueries,
sessionProperties,
mockKsqlEngine,
serviceContext
).getEntity().orElseThrow(IllegalStateException::new);
// Then
assertThat(queries.getQueries(), containsInAnyOrder(persistentQueryMetadataToRunningQuery(localMetadata, queryStatusCount)));
}
|
public synchronized String getSummary(boolean showDebugInfo) {
StringBuilder sb = new StringBuilder();
for (Operation op : ops) {
if (op != null) {
if (showDebugInfo) {
sb.append(op.getDebugInfo());
sb.append("\n");
} else {
op.getSummary(sb);
sb.append(";");
}
}
}
sb.append("\n");
getDurationInfo(sb);
return sb.toString();
}
|
@Test
public void testGetSummary() throws Exception {
verifySummary("getPrefetched", "GP");
verifySummary("getCached", "GC");
verifySummary("getRead", "GR");
verifySummary("release", "RL");
verifySummary("requestPrefetch", "RP");
verifySummary("prefetch", "PF");
verifySummary("requestCaching", "RC");
verifySummary("addToCache", "C+");
verifySummaryNoArg("cancelPrefetches", "CP");
verifySummaryNoArg("close", "CX");
}
|
public Expression rewrite(final Expression expression) {
return new ExpressionTreeRewriter<>(new OperatorPlugin()::process)
.rewrite(expression, null);
}
|
@Test
public void shouldReplaceComparisonOfRowTimeAndString() {
// Given:
final Expression predicate = getPredicate(
"SELECT * FROM orders where ROWTIME > '2017-01-01T00:00:00.000';");
// When:
final Expression rewritten = rewriter.rewrite(predicate);
// Then:
assertThat(rewritten.toString(), is(String.format("(ORDERS.ROWTIME > %d)", A_TIMESTAMP)));
}
|
T getFunction(final List<SqlArgument> arguments) {
// first try to get the candidates without any implicit casting
Optional<T> candidate = findMatchingCandidate(arguments, false);
if (candidate.isPresent()) {
return candidate.get();
} else if (!supportsImplicitCasts) {
throw createNoMatchingFunctionException(arguments);
}
// if none were found (candidate isn't present) try again with implicit casting
candidate = findMatchingCandidate(arguments, true);
if (candidate.isPresent()) {
return candidate.get();
}
throw createNoMatchingFunctionException(arguments);
}
|
@Test
public void shouldFindTwoDifferentArgs() {
// Given:
givenFunctions(
function(EXPECTED, -1, STRING, INT)
);
// When:
final KsqlScalarFunction fun = udfIndex
.getFunction(ImmutableList.of(SqlArgument.of(SqlTypes.STRING), SqlArgument.of(SqlTypes.INTEGER)));
// Then:
assertThat(fun.name(), equalTo(EXPECTED));
}
|
public void resolveAssertionConsumerService(AuthenticationRequest authenticationRequest) throws SamlValidationException {
// set URL if set in authnRequest
final String authnAcsURL = authenticationRequest.getAuthnRequest().getAssertionConsumerServiceURL();
if (authnAcsURL != null) {
authenticationRequest.setAssertionConsumerURL(authnAcsURL);
return;
}
// search url from metadata endpoints
final Integer authnAcsIdx = authenticationRequest.getAuthnRequest().getAssertionConsumerServiceIndex();
List<Endpoint> endpoints = authenticationRequest.getConnectionEntity().getRoleDescriptors().get(0).getEndpoints(AssertionConsumerService.DEFAULT_ELEMENT_NAME);
if (endpoints.isEmpty()) {
throw new SamlValidationException("Authentication: Assertion Consumer Service not found in metadata");
}
if (authnAcsIdx != null && endpoints.size() <= authnAcsIdx) {
throw new SamlValidationException("Authentication: Assertion Consumer Index is out of bounds");
}
// TODO: check if this statement is correct
if (endpoints.size() == 1) {
authenticationRequest.setAssertionConsumerURL(endpoints.get(0).getLocation());
return;
}
if(authnAcsIdx == null) {
AssertionConsumerService defaultAcs = endpoints.stream()
.filter(e -> e instanceof AssertionConsumerService)
.map(acs -> (AssertionConsumerService) acs)
.filter(IndexedEndpoint::isDefault)
.findAny()
.orElse(null);
if (defaultAcs == null) {
throw new SamlValidationException("Authentication: There is no default AssertionConsumerService");
}
authenticationRequest.setAssertionConsumerURL(defaultAcs.getLocation());
return;
}
authenticationRequest.setAssertionConsumerURL(endpoints.get(authnAcsIdx).getLocation());
}
|
@Test
void resolveAcsUrlWithIndex0InMultiAcsNoDefaultMetadata() throws SamlValidationException {
AuthnRequest authnRequest = OpenSAMLUtils.buildSAMLObject(AuthnRequest.class);
authnRequest.setAssertionConsumerServiceIndex(1);
AuthenticationRequest authenticationRequest = new AuthenticationRequest();
authenticationRequest.setAuthnRequest(authnRequest);
authenticationRequest.setConnectionEntity(MetadataParser.readMetadata(stubsMultiAcsMetadataFileWithoutDefault, CONNECTION_ENTITY_ID));
assertionConsumerServiceUrlService.resolveAssertionConsumerService(authenticationRequest);
assertEquals("SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS", authenticationRequest.getAssertionConsumerURL());
}
|
@Override
public String doLayout(ILoggingEvent event) {
StringWriter output = new StringWriter();
try (JsonWriter json = new JsonWriter(output)) {
json.beginObject();
if (!"".equals(nodeName)) {
json.name("nodename").value(nodeName);
}
json.name("process").value(processKey);
for (Map.Entry<String, String> entry : event.getMDCPropertyMap().entrySet()) {
if (entry.getValue() != null && !exclusions.contains(entry.getKey())) {
json.name(entry.getKey()).value(entry.getValue());
}
}
json
.name("timestamp").value(DATE_FORMATTER.format(Instant.ofEpochMilli(event.getTimeStamp())))
.name("severity").value(event.getLevel().toString())
.name("logger").value(event.getLoggerName())
.name("message").value(NEWLINE_REGEXP.matcher(event.getFormattedMessage()).replaceAll("\r"));
IThrowableProxy tp = event.getThrowableProxy();
if (tp != null) {
json.name("stacktrace").beginArray();
int nbOfTabs = 0;
while (tp != null) {
printFirstLine(json, tp, nbOfTabs);
render(json, tp, nbOfTabs);
tp = tp.getCause();
nbOfTabs++;
}
json.endArray();
}
json.endObject();
} catch (Exception e) {
e.printStackTrace();
throw new IllegalStateException("BUG - fail to create JSON", e);
}
output.write(System.lineSeparator());
return output.toString();
}
|
@Test
public void test_log_with_throwable_and_no_cause() {
Throwable exception = new IllegalStateException("BOOM");
LoggingEvent event = new LoggingEvent("org.foundation.Caller", (Logger) LoggerFactory.getLogger("the.logger"), Level.WARN, "the message", exception, new Object[0]);
String log = underTest.doLayout(event);
JsonLog json = new Gson().fromJson(log, JsonLog.class);
assertThat(json.process).isEqualTo("web");
assertThat(json.timestamp).isEqualTo(DATE_FORMATTER.format(Instant.ofEpochMilli(event.getTimeStamp())));
assertThat(json.severity).isEqualTo("WARN");
assertThat(json.logger).isEqualTo("the.logger");
assertThat(json.message).isEqualTo("the message");
assertThat(json.stacktrace).hasSizeGreaterThan(5);
assertThat(json.stacktrace[0]).isEqualTo("java.lang.IllegalStateException: BOOM");
assertThat(json.stacktrace[1]).startsWith("at ").contains("LogbackJsonLayoutTest.test_log_with_throwable");
assertThat(json.fromMdc).isNull();
}
|
@Override
public int intValue()
{
return (int)value;
}
|
@Override
@Test
void testIntValue()
{
for (int i = -1000; i < 3000; i += 200)
{
assertEquals(i, COSInteger.get(i).intValue());
}
}
|
public LogicalSchema resolve(final ExecutionStep<?> step, final LogicalSchema schema) {
return Optional.ofNullable(HANDLERS.get(step.getClass()))
.map(h -> h.handle(this, schema, step))
.orElseThrow(() -> new IllegalStateException("Unhandled step class: " + step.getClass()));
}
|
@Test
public void shouldResolveSchemaForStreamWindowedAggregate() {
// Given:
givenAggregateFunction("COUNT");
final StreamWindowedAggregate step = new StreamWindowedAggregate(
PROPERTIES,
groupedStreamSource,
formats,
ImmutableList.of(ColumnName.of("ORANGE")),
ImmutableList.of(functionCall("COUNT", "APPLE")),
new TumblingWindowExpression(new WindowTimeClause(10, TimeUnit.SECONDS))
);
// When:
final LogicalSchema result = resolver.resolve(step, SCHEMA);
// Then:
assertThat(result, is(
LogicalSchema.builder()
.keyColumn(ColumnName.of("K0"), SqlTypes.INTEGER)
.valueColumn(ColumnName.of("ORANGE"), SqlTypes.INTEGER)
.valueColumn(ColumnNames.aggregateColumn(0), SqlTypes.BIGINT)
.valueColumn(SystemColumns.WINDOWSTART_NAME, SystemColumns.WINDOWBOUND_TYPE)
.valueColumn(SystemColumns.WINDOWEND_NAME, SystemColumns.WINDOWBOUND_TYPE)
.build())
);
}
|
@Deprecated
public static String getJwt(JwtClaims claims) throws JoseException {
String jwt;
RSAPrivateKey privateKey = (RSAPrivateKey) getPrivateKey(
jwtConfig.getKey().getFilename(),jwtConfig.getKey().getPassword(), jwtConfig.getKey().getKeyName());
// A JWT is a JWS and/or a JWE with JSON claims as the payload.
// In this example it is a JWS nested inside a JWE
// So we first create a JsonWebSignature object.
JsonWebSignature jws = new JsonWebSignature();
// The payload of the JWS is JSON content of the JWT Claims
jws.setPayload(claims.toJson());
// The JWT is signed using the sender's private key
jws.setKey(privateKey);
// Get provider from security config file, it should be two digit
// And the provider id will set as prefix for keyid in the token header, for example: 05100
// if there is no provider id, we use "00" for the default value
String provider_id = "";
if (jwtConfig.getProviderId() != null) {
provider_id = jwtConfig.getProviderId();
if (provider_id.length() == 1) {
provider_id = "0" + provider_id;
} else if (provider_id.length() > 2) {
logger.error("provider_id defined in the security.yml file is invalid; the length should be 2");
provider_id = provider_id.substring(0, 2);
}
}
jws.setKeyIdHeaderValue(provider_id + jwtConfig.getKey().getKid());
// Set the signature algorithm on the JWT/JWS that will integrity protect the claims
jws.setAlgorithmHeaderValue(AlgorithmIdentifiers.RSA_USING_SHA256);
// Sign the JWS and produce the compact serialization, which will be the inner JWT/JWS
// representation, which is a string consisting of three dot ('.') separated
// base64url-encoded parts in the form Header.Payload.Signature
jwt = jws.getCompactSerialization();
return jwt;
}
|
@Test
public void longlivedCcLocalAdminScope() throws Exception {
JwtClaims claims = ClaimsUtil.getTestCcClaimsScope("f7d42348-c647-4efb-a52d-4c5787421e73", "admin");
claims.setExpirationTimeMinutesInTheFuture(5256000);
String jwt = JwtIssuer.getJwt(claims, long_kid, KeyUtil.deserializePrivateKey(long_key, KeyUtil.RSA));
System.out.println("***Long lived token for admin endpoints***: " + jwt);
}
|
public DdlCommandResult execute(
final String sql,
final DdlCommand ddlCommand,
final boolean withQuery,
final Set<SourceName> withQuerySources
) {
return execute(sql, ddlCommand, withQuery, withQuerySources, false);
}
|
@Test
public void shouldThrowOnMismatchedDatasourceType() {
// Given:
alterSource = new AlterSourceCommand(EXISTING_STREAM, DataSourceType.KTABLE.getKsqlType(), NEW_COLUMNS);
// When:
final KsqlException e = assertThrows(KsqlException.class,
() -> cmdExec.execute(SQL_TEXT, alterSource, false, NO_QUERY_SOURCES));
// Then:
assertThat(e.getMessage(), is("Incompatible data source type is STREAM, but statement was ALTER TABLE"));
}
|
@Override
public void write(Chunk<? extends I> chunk) throws Exception {
for (I item : chunk) {
LOG.debug("writing item [{}]...", item);
template.sendBody(endpointUri, item);
LOG.debug("wrote item");
}
}
|
@Test
public void shouldReadMessage() throws Exception {
// When
camelItemWriter.write(Chunk.of(message));
// Then
assertEquals(message, consumer().receiveBody("seda:queue"));
}
|
@VisibleForTesting
public void validateTemplateParams(NotifyTemplateDO template, Map<String, Object> templateParams) {
template.getParams().forEach(key -> {
Object value = templateParams.get(key);
if (value == null) {
throw exception(NOTIFY_SEND_TEMPLATE_PARAM_MISS, key);
}
});
}
|
@Test
public void testCheckTemplateParams_paramMiss() {
// 准备参数
NotifyTemplateDO template = randomPojo(NotifyTemplateDO.class,
o -> o.setParams(Lists.newArrayList("code")));
Map<String, Object> templateParams = new HashMap<>();
// mock 方法
// 调用,并断言异常
assertServiceException(() -> notifySendService.validateTemplateParams(template, templateParams),
NOTIFY_SEND_TEMPLATE_PARAM_MISS, "code");
}
|
@Override
@Nonnull
public List<Sdk> selectSdks(Configuration configuration, UsesSdk usesSdk) {
Config config = configuration.get(Config.class);
Set<Sdk> sdks = new TreeSet<>(configuredSdks(config, usesSdk));
if (enabledSdks != null) {
sdks = Sets.intersection(sdks, enabledSdks);
}
return Lists.newArrayList(sdks);
}
|
@Test
public void withExplicitSdk_selectSdks() throws Exception {
when(usesSdk.getTargetSdkVersion()).thenReturn(21);
when(usesSdk.getMinSdkVersion()).thenReturn(19);
when(usesSdk.getMaxSdkVersion()).thenReturn(22);
assertThat(sdkPicker.selectSdks(buildConfig(new Config.Builder().setSdk(21)), usesSdk))
.containsExactly(sdkCollection.getSdk(21));
assertThat(
sdkPicker.selectSdks(
buildConfig(new Config.Builder().setSdk(Config.OLDEST_SDK)), usesSdk))
.containsExactly(sdkCollection.getSdk(19));
assertThat(
sdkPicker.selectSdks(
buildConfig(new Config.Builder().setSdk(Config.TARGET_SDK)), usesSdk))
.containsExactly(sdkCollection.getSdk(21));
assertThat(
sdkPicker.selectSdks(
buildConfig(new Config.Builder().setSdk(Config.NEWEST_SDK)), usesSdk))
.containsExactly(sdkCollection.getSdk(22));
assertThat(sdkPicker.selectSdks(buildConfig(new Config.Builder().setSdk(16)), usesSdk))
.containsExactly(sdkCollection.getSdk(16));
assertThat(sdkPicker.selectSdks(buildConfig(new Config.Builder().setSdk(23)), usesSdk))
.containsExactly(sdkCollection.getSdk(23));
}
|
public static HivePartitionStats reduce(HivePartitionStats first, HivePartitionStats second, ReduceOperator operator) {
return HivePartitionStats.fromCommonStats(
reduce(first.getCommonStats().getRowNums(), second.getCommonStats().getRowNums(), operator),
reduce(first.getCommonStats().getTotalFileBytes(), second.getCommonStats().getTotalFileBytes(), operator));
}
|
@Test
public void testReduce() {
Assert.assertEquals(10, HivePartitionStats.reduce(5, 5, HivePartitionStats.ReduceOperator.ADD));
Assert.assertEquals(0, HivePartitionStats.reduce(5, 5, HivePartitionStats.ReduceOperator.SUBTRACT));
Assert.assertEquals(5, HivePartitionStats.reduce(5, 6, HivePartitionStats.ReduceOperator.MIN));
Assert.assertEquals(6, HivePartitionStats.reduce(5, 6, HivePartitionStats.ReduceOperator.MAX));
}
|
@Override
public Object handle(ProceedingJoinPoint proceedingJoinPoint, Bulkhead bulkhead,
String methodName) throws Throwable {
Object returnValue = proceedingJoinPoint.proceed();
if (Flux.class.isAssignableFrom(returnValue.getClass())) {
Flux<?> fluxReturnValue = (Flux<?>) returnValue;
return fluxReturnValue.transformDeferred(BulkheadOperator.of(bulkhead));
} else if (Mono.class.isAssignableFrom(returnValue.getClass())) {
Mono<?> monoReturnValue = (Mono<?>) returnValue;
return monoReturnValue.transformDeferred(BulkheadOperator.of(bulkhead));
} else {
logger.error("Unsupported type for Reactor BulkHead {}",
returnValue.getClass().getTypeName());
throw new IllegalArgumentException(
"Not Supported type for the BulkHead in Reactor :" + returnValue.getClass()
.getName());
}
}
|
@Test
public void testReactorTypes() throws Throwable {
Bulkhead bulkhead = Bulkhead.ofDefaults("test");
when(proceedingJoinPoint.proceed()).thenReturn(Mono.just("Test"));
assertThat(reactorBulkheadAspectExt.handle(proceedingJoinPoint, bulkhead, "testMethod"))
.isNotNull();
when(proceedingJoinPoint.proceed()).thenReturn(Flux.just("Test"));
assertThat(reactorBulkheadAspectExt.handle(proceedingJoinPoint, bulkhead, "testMethod"))
.isNotNull();
}
|
@Override
public boolean containsKey(final Object key)
{
final int mask = values.length - 1;
int index = Hashing.hash(key, mask);
boolean found = false;
while (missingValue != values[index])
{
if (key.equals(keys[index]))
{
found = true;
break;
}
index = ++index & mask;
}
return found;
}
|
@Test
public void shouldNotContainKeyOfAMissingKey() {
assertFalse(map.containsKey("1"));
}
|
public static WalletFile createStandard(String password, ECKeyPair ecKeyPair)
throws CipherException {
return create(password, ecKeyPair, N_STANDARD, P_STANDARD);
}
|
@Test
public void testCreateStandard() throws Exception {
testCreate(Wallet.createStandard(SampleKeys.PASSWORD, SampleKeys.KEY_PAIR));
}
|
public void setContract(@Nullable Produce contract)
{
this.contract = contract;
setStoredContract(contract);
handleContractState();
}
|
@Test
public void cabbageContractCabbageDiseasedAndCabbageGrowing()
{
final long unixNow = Instant.now().getEpochSecond();
final long expectedTime = unixNow + 60;
// Get the two allotment patches
final FarmingPatch patch1 = farmingGuildPatches.get(Varbits.FARMING_4773);
final FarmingPatch patch2 = farmingGuildPatches.get(Varbits.FARMING_4774);
assertNotNull(patch1);
assertNotNull(patch2);
// Specify the two allotment patches
when(farmingTracker.predictPatch(patch1))
.thenReturn(new PatchPrediction(Produce.CABBAGE, CropState.GROWING, expectedTime, 2, 3));
when(farmingTracker.predictPatch(patch2))
.thenReturn(new PatchPrediction(Produce.CABBAGE, CropState.DISEASED, 0, 2, 3));
farmingContractManager.setContract(Produce.CABBAGE);
assertEquals(SummaryState.IN_PROGRESS, farmingContractManager.getSummary());
// Prefer healthy cabbages
assertEquals(CropState.GROWING, farmingContractManager.getContractCropState());
assertEquals(expectedTime, farmingContractManager.getCompletionTime());
}
|
public synchronized void removeRemoteSource(TaskId sourceTaskId)
{
requireNonNull(sourceTaskId, "sourceTaskId is null");
// Ignore removeRemoteSource call if exchange client is already closed
if (closed.get()) {
return;
}
removedRemoteSourceTaskIds.add(sourceTaskId);
URI location = taskIdToLocationMap.get(sourceTaskId);
if (location == null) {
return;
}
PageBufferClient client = allClients.get(location);
if (client == null) {
return;
}
closeQuietly(client);
removedClients.add(client);
completedClients.add(client);
}
|
@Test
public void testRemoveRemoteSource()
throws Exception
{
DataSize bufferCapacity = new DataSize(1, BYTE);
DataSize maxResponseSize = new DataSize(1, BYTE);
MockExchangeRequestProcessor processor = new MockExchangeRequestProcessor(maxResponseSize);
URI location1 = URI.create("http://localhost:8081/foo.0.0.0.0");
TaskId taskId1 = TaskId.valueOf("foo.0.0.0.0");
URI location2 = URI.create("http://localhost:8082/bar.0.0.0.0");
TaskId taskId2 = TaskId.valueOf("bar.0.0.0.0");
processor.addPage(location1, createPage(1));
processor.addPage(location1, createPage(2));
processor.addPage(location1, createPage(3));
ExchangeClient exchangeClient = createExchangeClient(processor, bufferCapacity, maxResponseSize);
exchangeClient.addLocation(location1, taskId1);
exchangeClient.addLocation(location2, taskId2);
assertFalse(exchangeClient.isClosed());
// Wait until exactly one page is buffered:
// * We cannot call ExchangeClient#pollPage() directly, since it will schedule the next request to buffer data,
// and this request to buffer data could win the race against the request to remove remote source.
// * Buffer capacity is set to 1 byte, so only one page can be buffered.
waitUntilEquals(() -> exchangeClient.getStatus().getBufferedPages(), 1, new Duration(5, SECONDS));
assertEquals(exchangeClient.getStatus().getBufferedPages(), 1);
// remove remote source
exchangeClient.removeRemoteSource(taskId1);
// the previously buffered page will still be read out
assertPageEquals(getNextPage(exchangeClient), createPage(1));
// client should not receive any further pages from removed remote source
assertNull(exchangeClient.pollPage());
assertEquals(exchangeClient.getStatus().getBufferedPages(), 0);
// add pages to another source
processor.addPage(location2, createPage(4));
processor.addPage(location2, createPage(5));
processor.addPage(location2, createPage(6));
processor.setComplete(location2);
assertFalse(exchangeClient.isClosed());
assertPageEquals(getNextPage(exchangeClient), createPage(4));
assertFalse(exchangeClient.isClosed());
assertPageEquals(getNextPage(exchangeClient), createPage(5));
assertFalse(exchangeClient.isClosed());
assertPageEquals(getNextPage(exchangeClient), createPage(6));
assertFalse(tryGetFutureValue(exchangeClient.isBlocked(), 10, MILLISECONDS).isPresent());
assertFalse(exchangeClient.isClosed());
exchangeClient.noMoreLocations();
// The transition to closed may happen asynchronously, since it requires that all the HTTP clients
// receive a final GONE response, so just spin until it's closed or the test times out.
while (!exchangeClient.isClosed()) {
Thread.sleep(1);
}
ExchangeClientStatus exchangeClientStatus = exchangeClient.getStatus();
Optional<PageBufferClientStatus> clientStatusOptional1 = exchangeClientStatus.getPageBufferClientStatuses()
.stream()
.filter(pageBufferClientStatus -> pageBufferClientStatus.getUri().equals(location1)).findFirst();
assertTrue(clientStatusOptional1.isPresent());
assertStatus(clientStatusOptional1.get(), "closed", "not scheduled");
Optional<PageBufferClientStatus> clientStatusOptional2 = exchangeClientStatus.getPageBufferClientStatuses()
.stream()
.filter(pageBufferClientStatus -> pageBufferClientStatus.getUri().equals(location2)).findFirst();
assertTrue(clientStatusOptional2.isPresent());
assertStatus(clientStatusOptional2.get(), "closed", "not scheduled");
}
|
public void completeTx(SendRequest req) throws InsufficientMoneyException, CompletionException {
lock.lock();
try {
checkArgument(!req.completed, () ->
"given SendRequest has already been completed");
log.info("Completing send tx with {} outputs totalling {} and a fee of {}/vkB", req.tx.getOutputs().size(),
req.tx.getOutputSum().toFriendlyString(), req.feePerKb.toFriendlyString());
// Calculate a list of ALL potential candidates for spending and then ask a coin selector to provide us
// with the actual outputs that'll be used to gather the required amount of value. In this way, users
// can customize coin selection policies. The call below will ignore immature coinbases and outputs
// we don't have the keys for.
List<TransactionOutput> prelimCandidates = calculateAllSpendCandidates(true, req.missingSigsMode == MissingSigsMode.THROW);
// Connect (add a value amount) unconnected inputs
List<TransactionInput> inputs = connectInputs(prelimCandidates, req.tx.getInputs());
req.tx.clearInputs();
inputs.forEach(req.tx::addInput);
// Warn if there are remaining unconnected inputs whose value we do not know
// TODO: Consider throwing if there are inputs that we don't have a value for
if (req.tx.getInputs().stream()
.map(TransactionInput::getValue)
.anyMatch(Objects::isNull))
log.warn("SendRequest transaction already has inputs but we don't know how much they are worth - they will be added to fee.");
// If any inputs have already been added, we don't need to get their value from wallet
Coin totalInput = req.tx.getInputSum();
// Calculate the amount of value we need to import.
Coin valueNeeded = req.tx.getOutputSum().subtract(totalInput);
// Enforce the OP_RETURN limit
if (req.tx.getOutputs().stream()
.filter(o -> ScriptPattern.isOpReturn(o.getScriptPubKey()))
.count() > 1) // Only 1 OP_RETURN per transaction allowed.
throw new MultipleOpReturnRequested();
// Check for dusty sends
if (req.ensureMinRequiredFee && !req.emptyWallet) { // Min fee checking is handled later for emptyWallet.
if (req.tx.getOutputs().stream().anyMatch(TransactionOutput::isDust))
throw new DustySendRequested();
}
// Filter out candidates that are already included in the transaction inputs
List<TransactionOutput> candidates = prelimCandidates.stream()
.filter(output -> alreadyIncluded(req.tx.getInputs(), output))
.collect(StreamUtils.toUnmodifiableList());
CoinSelection bestCoinSelection;
TransactionOutput bestChangeOutput = null;
List<Coin> updatedOutputValues = null;
if (!req.emptyWallet) {
// This can throw InsufficientMoneyException.
FeeCalculation feeCalculation = calculateFee(req, valueNeeded, req.ensureMinRequiredFee, candidates);
bestCoinSelection = feeCalculation.bestCoinSelection;
bestChangeOutput = feeCalculation.bestChangeOutput;
updatedOutputValues = feeCalculation.updatedOutputValues;
} else {
// We're being asked to empty the wallet. What this means is ensuring "tx" has only a single output
// of the total value we can currently spend as determined by the selector, and then subtracting the fee.
checkState(req.tx.getOutputs().size() == 1, () ->
"empty wallet TX must have a single output only");
CoinSelector selector = req.coinSelector == null ? coinSelector : req.coinSelector;
bestCoinSelection = selector.select((Coin) network.maxMoney(), candidates);
candidates = null; // Selector took ownership and might have changed candidates. Don't access again.
req.tx.getOutput(0).setValue(bestCoinSelection.totalValue());
log.info(" emptying {}", bestCoinSelection.totalValue().toFriendlyString());
}
bestCoinSelection.outputs()
.forEach(req.tx::addInput);
if (req.emptyWallet) {
if (!adjustOutputDownwardsForFee(req.tx, bestCoinSelection, req.feePerKb, req.ensureMinRequiredFee))
throw new CouldNotAdjustDownwards();
}
if (updatedOutputValues != null) {
for (int i = 0; i < updatedOutputValues.size(); i++) {
req.tx.getOutput(i).setValue(updatedOutputValues.get(i));
}
}
if (bestChangeOutput != null) {
req.tx.addOutput(bestChangeOutput);
log.info(" with {} change", bestChangeOutput.getValue().toFriendlyString());
}
// Now shuffle the outputs to obfuscate which is the change.
if (req.shuffleOutputs)
req.tx.shuffleOutputs();
// Now sign the inputs, thus proving that we are entitled to redeem the connected outputs.
if (req.signInputs)
signTransaction(req);
// Check size.
final int size = req.tx.messageSize();
if (size > Transaction.MAX_STANDARD_TX_SIZE)
throw new ExceededMaxTransactionSize();
// Label the transaction as being self created. We can use this later to spend its change output even before
// the transaction is confirmed. We deliberately won't bother notifying listeners here as there's not much
// point - the user isn't interested in a confidence transition they made themselves.
getConfidence(req.tx).setSource(TransactionConfidence.Source.SELF);
// Label the transaction as being a user requested payment. This can be used to render GUI wallet
// transaction lists more appropriately, especially when the wallet starts to generate transactions itself
// for internal purposes.
req.tx.setPurpose(Transaction.Purpose.USER_PAYMENT);
// Record the exchange rate that was valid when the transaction was completed.
req.tx.setExchangeRate(req.exchangeRate);
req.tx.setMemo(req.memo);
req.completed = true;
log.info(" completed: {}", req.tx);
} finally {
lock.unlock();
}
}
|
@Test
public void sendRequestMemo() throws Exception {
receiveATransaction(wallet, myAddress);
SendRequest sendRequest = SendRequest.to(myAddress, Coin.COIN);
sendRequest.memo = "memo";
wallet.completeTx(sendRequest);
assertEquals(sendRequest.memo, sendRequest.tx.getMemo());
}
|
static void dissectFrame(
final DriverEventCode eventCode,
final MutableDirectBuffer buffer,
final int offset,
final StringBuilder builder)
{
int encodedLength = dissectLogHeader(CONTEXT, eventCode, buffer, offset, builder);
builder.append(": address=");
encodedLength += dissectSocketAddress(buffer, offset + encodedLength, builder);
builder.append(" ");
final int frameOffset = offset + encodedLength;
final int frameType = frameType(buffer, frameOffset);
switch (frameType)
{
case HeaderFlyweight.HDR_TYPE_PAD:
case HeaderFlyweight.HDR_TYPE_DATA:
DATA_HEADER.wrap(buffer, frameOffset, buffer.capacity() - frameOffset);
dissectDataFrame(builder);
break;
case HeaderFlyweight.HDR_TYPE_SM:
SM_HEADER.wrap(buffer, frameOffset, buffer.capacity() - frameOffset);
dissectStatusFrame(builder);
break;
case HeaderFlyweight.HDR_TYPE_NAK:
NAK_HEADER.wrap(buffer, frameOffset, buffer.capacity() - frameOffset);
dissectNakFrame(builder);
break;
case HeaderFlyweight.HDR_TYPE_SETUP:
SETUP_HEADER.wrap(buffer, frameOffset, buffer.capacity() - frameOffset);
dissectSetupFrame(builder);
break;
case HeaderFlyweight.HDR_TYPE_RTTM:
RTT_MEASUREMENT.wrap(buffer, frameOffset, buffer.capacity() - frameOffset);
dissectRttFrame(builder);
break;
case HeaderFlyweight.HDR_TYPE_RES:
dissectResFrame(buffer, frameOffset, builder);
break;
case HeaderFlyweight.HDR_TYPE_RSP_SETUP:
RSP_SETUP.wrap(buffer, frameOffset, buffer.capacity() - frameOffset);
dissectRspSetupFrame(builder);
break;
default:
builder.append("type=UNKNOWN(").append(frameType).append(")");
break;
}
}
|
@Test
void dissectFrameTypePad()
{
internalEncodeLogHeader(buffer, 0, 5, 5, () -> 1_000_000_000);
final int socketAddressOffset = encodeSocketAddress(
buffer, LOG_HEADER_LENGTH, new InetSocketAddress("localhost", 8080));
final DataHeaderFlyweight flyweight = new DataHeaderFlyweight();
flyweight.wrap(buffer, LOG_HEADER_LENGTH + socketAddressOffset, 300);
flyweight.headerType(HDR_TYPE_PAD);
flyweight.flags((short)13);
flyweight.frameLength(100);
flyweight.sessionId(42);
flyweight.streamId(5);
flyweight.termId(16);
flyweight.termOffset(1045);
dissectFrame(FRAME_IN, buffer, 0, builder);
assertEquals("[1.000000000] " + CONTEXT + ": " + FRAME_IN.name() + " [5/5]: " +
"address=127.0.0.1:8080 type=PAD flags=00001101 frameLength=100 sessionId=42 streamId=5 termId=16 " +
"termOffset=1045",
builder.toString());
}
|
@Override
public int getDatabaseMajorVersion() {
return 0;
}
|
@Test
void assertGetDatabaseMajorVersion() {
assertThat(metaData.getDatabaseMajorVersion(), is(0));
}
|
public static <T> Inner<T> create() {
return new Inner<>();
}
|
@Test
@Category(NeedsRunner.class)
public void addNestedField() {
Schema nested = Schema.builder().addStringField("field1").build();
Schema schema = Schema.builder().addRowField("nested", nested).build();
Row subRow = Row.withSchema(nested).addValue("value").build();
Row row = Row.withSchema(schema).addValue(subRow).build();
PCollection<Row> added =
pipeline
.apply(Create.of(row).withRowSchema(schema))
.apply(
AddFields.<Row>create()
.field("nested.field2", Schema.FieldType.INT32)
.field("nested.field3", Schema.FieldType.array(Schema.FieldType.STRING))
.field("nested.field4", Schema.FieldType.iterable(Schema.FieldType.STRING)));
Schema expectedNestedSchema =
Schema.builder()
.addStringField("field1")
.addNullableField("field2", Schema.FieldType.INT32)
.addNullableField("field3", Schema.FieldType.array(Schema.FieldType.STRING))
.addNullableField("field4", Schema.FieldType.iterable(Schema.FieldType.STRING))
.build();
Schema expectedSchema = Schema.builder().addRowField("nested", expectedNestedSchema).build();
assertEquals(expectedSchema, added.getSchema());
Row expectedNested =
Row.withSchema(expectedNestedSchema).addValues("value", null, null, null).build();
Row expected = Row.withSchema(expectedSchema).addValue(expectedNested).build();
PAssert.that(added).containsInAnyOrder(expected);
pipeline.run();
}
|
@Override
public ContinuousEnumerationResult planSplits(IcebergEnumeratorPosition lastPosition) {
table.refresh();
if (lastPosition != null) {
return discoverIncrementalSplits(lastPosition);
} else {
return discoverInitialSplits();
}
}
|
@Test
public void testMaxPlanningSnapshotCount() throws Exception {
appendTwoSnapshots();
// append 3 more snapshots
for (int i = 2; i < 5; ++i) {
appendSnapshot(i, 2);
}
ScanContext scanContext =
ScanContext.builder()
.startingStrategy(StreamingStartingStrategy.INCREMENTAL_FROM_EARLIEST_SNAPSHOT)
// limit to 1 snapshot per discovery
.maxPlanningSnapshotCount(1)
.build();
ContinuousSplitPlannerImpl splitPlanner =
new ContinuousSplitPlannerImpl(TABLE_RESOURCE.tableLoader().clone(), scanContext, null);
ContinuousEnumerationResult initialResult = splitPlanner.planSplits(null);
assertThat(initialResult.fromPosition()).isNull();
// For inclusive behavior, the initial result should point to snapshot1's parent,
// which leads to null snapshotId and snapshotTimestampMs.
assertThat(initialResult.toPosition().snapshotId()).isNull();
assertThat(initialResult.toPosition().snapshotTimestampMs()).isNull();
assertThat(initialResult.splits()).isEmpty();
ContinuousEnumerationResult secondResult = splitPlanner.planSplits(initialResult.toPosition());
// should discover dataFile1 appended in snapshot1
verifyMaxPlanningSnapshotCountResult(
secondResult, null, snapshot1, ImmutableSet.of(dataFile1.path().toString()));
ContinuousEnumerationResult thirdResult = splitPlanner.planSplits(secondResult.toPosition());
// should discover dataFile2 appended in snapshot2
verifyMaxPlanningSnapshotCountResult(
thirdResult, snapshot1, snapshot2, ImmutableSet.of(dataFile2.path().toString()));
}
|
public static <K, V, S extends StateStore> Materialized<K, V, S> as(final DslStoreSuppliers storeSuppliers) {
Objects.requireNonNull(storeSuppliers, "store type can't be null");
return new Materialized<>(storeSuppliers);
}
|
@Test
public void shouldAllowValidTopicNamesAsStoreName() {
Materialized.as("valid-name");
Materialized.as("valid.name");
Materialized.as("valid_name");
}
|
@Override
public Mono<ConfirmUsernameHashResponse> confirmUsernameHash(final ConfirmUsernameHashRequest request) {
final AuthenticatedDevice authenticatedDevice = AuthenticationUtil.requireAuthenticatedDevice();
if (request.getUsernameHash().isEmpty()) {
throw Status.INVALID_ARGUMENT
.withDescription("Username hash must not be empty")
.asRuntimeException();
}
if (request.getUsernameHash().size() != AccountController.USERNAME_HASH_LENGTH) {
throw Status.INVALID_ARGUMENT
.withDescription(String.format("Username hash length must be %d bytes, but was actually %d",
AccountController.USERNAME_HASH_LENGTH, request.getUsernameHash().size()))
.asRuntimeException();
}
if (request.getZkProof().isEmpty()) {
throw Status.INVALID_ARGUMENT
.withDescription("Zero-knowledge proof must not be empty")
.asRuntimeException();
}
if (request.getUsernameCiphertext().isEmpty()) {
throw Status.INVALID_ARGUMENT
.withDescription("Username ciphertext must not be empty")
.asRuntimeException();
}
if (request.getUsernameCiphertext().size() > AccountController.MAXIMUM_USERNAME_CIPHERTEXT_LENGTH) {
throw Status.INVALID_ARGUMENT
.withDescription(String.format("Username hash length must at most %d bytes, but was actually %d",
AccountController.MAXIMUM_USERNAME_CIPHERTEXT_LENGTH, request.getUsernameCiphertext().size()))
.asRuntimeException();
}
try {
usernameHashZkProofVerifier.verifyProof(request.getZkProof().toByteArray(), request.getUsernameHash().toByteArray());
} catch (final BaseUsernameException e) {
throw Status.INVALID_ARGUMENT.withDescription("Could not verify proof").asRuntimeException();
}
return rateLimiters.getUsernameSetLimiter().validateReactive(authenticatedDevice.accountIdentifier())
.then(Mono.fromFuture(() -> accountsManager.getByAccountIdentifierAsync(authenticatedDevice.accountIdentifier())))
.map(maybeAccount -> maybeAccount.orElseThrow(Status.UNAUTHENTICATED::asRuntimeException))
.flatMap(account -> Mono.fromFuture(() -> accountsManager.confirmReservedUsernameHash(account, request.getUsernameHash().toByteArray(), request.getUsernameCiphertext().toByteArray())))
.map(updatedAccount -> ConfirmUsernameHashResponse.newBuilder()
.setUsernameHash(ByteString.copyFrom(updatedAccount.getUsernameHash().orElseThrow()))
.setUsernameLinkHandle(UUIDUtil.toByteString(updatedAccount.getUsernameLinkHandle()))
.build())
.onErrorMap(UsernameReservationNotFoundException.class, throwable -> Status.FAILED_PRECONDITION.asRuntimeException())
.onErrorMap(UsernameHashNotAvailableException.class, throwable -> Status.NOT_FOUND.asRuntimeException());
}
|
@Test
void confirmUsernameHash() {
final byte[] usernameHash = TestRandomUtil.nextBytes(AccountController.USERNAME_HASH_LENGTH);
final byte[] usernameCiphertext = TestRandomUtil.nextBytes(32);
final byte[] zkProof = TestRandomUtil.nextBytes(32);
final UUID linkHandle = UUID.randomUUID();
final Account account = mock(Account.class);
when(accountsManager.getByAccountIdentifierAsync(AUTHENTICATED_ACI))
.thenReturn(CompletableFuture.completedFuture(Optional.of(account)));
when(accountsManager.confirmReservedUsernameHash(account, usernameHash, usernameCiphertext))
.thenAnswer(invocation -> {
final Account updatedAccount = mock(Account.class);
when(updatedAccount.getUsernameHash()).thenReturn(Optional.of(usernameHash));
when(updatedAccount.getUsernameLinkHandle()).thenReturn(linkHandle);
return CompletableFuture.completedFuture(updatedAccount);
});
final ConfirmUsernameHashResponse expectedResponse = ConfirmUsernameHashResponse.newBuilder()
.setUsernameHash(ByteString.copyFrom(usernameHash))
.setUsernameLinkHandle(UUIDUtil.toByteString(linkHandle))
.build();
assertEquals(expectedResponse,
authenticatedServiceStub().confirmUsernameHash(ConfirmUsernameHashRequest.newBuilder()
.setUsernameHash(ByteString.copyFrom(usernameHash))
.setUsernameCiphertext(ByteString.copyFrom(usernameCiphertext))
.setZkProof(ByteString.copyFrom(zkProof))
.build()));
}
|
public static boolean areExceptionsPresentInChain(Throwable error, Class ... types) {
while (error != null) {
for (Class type : types) {
if (type.isInstance(error)) {
return true;
}
}
error = error.getCause();
}
return false;
}
|
@Test
public void testAreExceptionsPresentInChain4() {
assertTrue(Exceptions.areExceptionsPresentInChain(new IllegalArgumentException(new IllegalStateException()), UnsupportedOperationException.class, IllegalStateException.class));
}
|
public void processScheduledTask(String name) throws SharedServiceClientException {
if(RE_CHECK_DOCUMENTS.equals(name)) {
var daysAgo = sharedServiceClient.getSSConfigInt("interval_lost_stolen_check");
for (IdCheckDocument document : idCheckDocumentRepository.findAllWithCreationDateTimeBefore(ZonedDateTime.now().minusDays(daysAgo))) {
var response = dwsClient.checkBvBsn(document.getDocumentType(), document.getDocumentNumber());
if (response.get(STATUS).equals(NOK)) {
digidClient.remoteLog("1577", Map.of(
"document_type", document.getDocumentType(),
"document_number", document.getDocumentNumber(),
"user_app", document.getUserAppId(),
"account_id", document.getAccountId(),
"created_at", document.getCreatedAt().format(DateTimeFormatter.ISO_LOCAL_DATE),
HIDDEN, true
));
}
idCheckDocumentRepository.delete(document);
}
}
}
|
@Test
void processesScheduledTaskValidDocument() throws SharedServiceClientException {
when(dwsClient.checkBvBsn("document_type", "document_number")).thenReturn(Map.of("status", "OK"));
when(sharedServiceClient.getSSConfigInt("interval_lost_stolen_check")).thenReturn(7);
service.processScheduledTask("re_check_documents");
verify(digidClient, times(0)).remoteLog("1577", Map.of("document_type", "document_type", "document_number", "document_number","user_app", "user_app_id", "account_id", 1L, "created_at", "2022-10-07", "hidden", true));
}
|
@ManagedOperation(description = "Unsubscribe for dynamic routing on a channel by subscription ID")
public boolean removeSubscription(
String subscribeChannel,
String subscriptionId) {
return filterService.removeFilterById(subscriptionId, subscribeChannel);
}
|
@Test
void removeSubscription() {
service.removeSubscription(subscribeChannel, subscriptionId);
Mockito.verify(filterService, Mockito.times(1))
.removeFilterById(subscriptionId, subscribeChannel);
}
|
public GroupExpression init(OptExpression originExpression) {
Preconditions.checkState(groups.size() == 0);
Preconditions.checkState(groupExpressions.size() == 0);
GroupExpression rootGroupExpression = copyIn(null, originExpression).second;
rootGroup = rootGroupExpression.getGroup();
return rootGroupExpression;
}
|
@Test
public void testInit(@Mocked OlapTable olapTable1,
@Mocked OlapTable olapTable2) {
new Expectations() {
{
olapTable1.getId();
result = 0;
minTimes = 0;
olapTable2.getId();
result = 1;
minTimes = 0;
}
};
OptExpression expr = OptExpression.create(new LogicalProjectOperator(Maps.newHashMap()),
OptExpression.create(new LogicalJoinOperator(),
OptExpression.create(new LogicalOlapScanOperator(olapTable1)),
OptExpression.create(new LogicalOlapScanOperator(olapTable2))));
Memo memo = new Memo();
GroupExpression groupExpression = memo.init(expr);
assertEquals(OperatorType.LOGICAL_PROJECT, groupExpression.getOp().getOpType());
assertEquals(OperatorType.LOGICAL_JOIN,
groupExpression.inputAt(0).getFirstLogicalExpression().getOp().getOpType());
assertEquals(OperatorType.LOGICAL_OLAP_SCAN,
groupExpression.inputAt(0).getFirstLogicalExpression().inputAt(0)
.getFirstLogicalExpression().getOp()
.getOpType());
assertEquals(OperatorType.LOGICAL_OLAP_SCAN,
groupExpression.inputAt(0).getFirstLogicalExpression().inputAt(1)
.getFirstLogicalExpression().getOp()
.getOpType());
assertEquals(memo.getGroups().size(), 4);
assertEquals(memo.getGroupExpressions().size(), 4);
assertEquals(memo.getGroups().get(0).getId(), 0);
assertEquals(memo.getGroups().get(1).getId(), 1);
assertEquals(memo.getGroups().get(2).getId(), 2);
assertEquals(memo.getGroups().get(3).getId(), 3);
}
|
@Override
public Object collect() {
return value;
}
|
@Test
public void test_default() {
ValueSqlAggregation aggregation = new ValueSqlAggregation();
assertThat(aggregation.collect()).isNull();
}
|
public FEELFnResult<Boolean> invoke(@ParameterName("list") List list, @ParameterName("element") Object element) {
if ( list == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null"));
}
if (element == null) {
return FEELFnResult.ofResult(list.contains(element));
}
Object e = NumberEvalHelper.coerceNumber(element);
boolean found = false;
ListIterator<?> it = list.listIterator();
while (it.hasNext() && !found) {
Object next = NumberEvalHelper.coerceNumber(it.next());
found = itemEqualsSC(e, next);
}
return FEELFnResult.ofResult(found);
}
|
@Test
void invokeContainsNull() {
FunctionTestUtil.assertResult(listContainsFunction.invoke(Collections.singletonList(null), null), true);
FunctionTestUtil.assertResult(listContainsFunction.invoke(Arrays.asList(1, null), null), true);
FunctionTestUtil.assertResult(listContainsFunction.invoke(Arrays.asList(null, 1), null), true);
}
|
@GetMapping("/status")
public TwoFactorStatusResult getTwoFactor(@RequestHeader(MijnDigidSession.MIJN_DIGID_SESSION_HEADER) String mijnDigiDsessionId){
MijnDigidSession mijnDigiDSession = retrieveMijnDigiDSession(mijnDigiDsessionId);
return this.accountService.getTwoFactorStatus(mijnDigiDSession.getAccountId(), mijnDigiDSession.getDeviceName(), mijnDigiDSession.getAppCode());
}
|
@Test
public void validTwoFactorStatus() {
TwoFactorStatusResult result = new TwoFactorStatusResult();
result.setStatus(Status.OK);
result.setError("error");
when(accountService.getTwoFactorStatus(eq(1L), any(), any())).thenReturn(result);
TwoFactorStatusResult twoFactor = twoFactorController.getTwoFactor(mijnDigiDSession.getId());
assertEquals(Status.OK, twoFactor.getStatus());
assertEquals("error", twoFactor.getError());
}
|
public static CharSequence escapeCsv(CharSequence value) {
return escapeCsv(value, false);
}
|
@Test
public void escapeCsvQuoted() {
CharSequence value = "\"foo,goo\"";
escapeCsv(value, value);
}
|
@Override
public Column convert(BasicTypeDefine typeDefine) {
try {
return super.convert(typeDefine);
} catch (SeaTunnelRuntimeException e) {
PhysicalColumn.PhysicalColumnBuilder builder =
PhysicalColumn.builder()
.name(typeDefine.getName())
.sourceType(typeDefine.getColumnType())
.nullable(typeDefine.isNullable())
.defaultValue(typeDefine.getDefaultValue())
.comment(typeDefine.getComment());
String kingbaseDataType = typeDefine.getDataType().toUpperCase();
switch (kingbaseDataType) {
case KB_TINYINT:
builder.dataType(BasicType.BYTE_TYPE);
break;
case KB_MONEY:
builder.dataType(new DecimalType(38, 18));
builder.columnLength(38L);
builder.scale(18);
break;
case KB_BLOB:
builder.dataType(PrimitiveByteArrayType.INSTANCE);
builder.columnLength((long) (1024 * 1024 * 1024));
break;
case KB_CLOB:
builder.dataType(BasicType.STRING_TYPE);
builder.columnLength(typeDefine.getLength());
builder.columnLength((long) (1024 * 1024 * 1024));
break;
case KB_BIT:
builder.dataType(PrimitiveByteArrayType.INSTANCE);
// BIT(M) -> BYTE(M/8)
long byteLength = typeDefine.getLength() / 8;
byteLength += typeDefine.getLength() % 8 > 0 ? 1 : 0;
builder.columnLength(byteLength);
break;
default:
throw CommonError.convertToSeaTunnelTypeError(
DatabaseIdentifier.KINGBASE,
typeDefine.getDataType(),
typeDefine.getName());
}
return builder.build();
}
}
|
@Test
public void testConvertDate() {
BasicTypeDefine<Object> typeDefine =
BasicTypeDefine.builder().name("test").columnType("date").dataType("date").build();
Column column = KingbaseTypeConverter.INSTANCE.convert(typeDefine);
Assertions.assertEquals(typeDefine.getName(), column.getName());
Assertions.assertEquals(LocalTimeType.LOCAL_DATE_TYPE, column.getDataType());
Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType().toLowerCase());
}
|
public void removeSensor(String name) {
Sensor sensor = sensors.get(name);
if (sensor != null) {
List<Sensor> childSensors = null;
synchronized (sensor) {
synchronized (this) {
if (sensors.remove(name, sensor)) {
for (KafkaMetric metric : sensor.metrics())
removeMetric(metric.metricName());
log.trace("Removed sensor with name {}", name);
childSensors = childrenSensors.remove(sensor);
for (final Sensor parent : sensor.parents()) {
childrenSensors.getOrDefault(parent, emptyList()).remove(sensor);
}
}
}
}
if (childSensors != null) {
for (Sensor childSensor : childSensors)
removeSensor(childSensor.name());
}
}
}
|
@Test
public void testRemoveSensor() {
int size = metrics.metrics().size();
Sensor parent1 = metrics.sensor("test.parent1");
parent1.add(metrics.metricName("test.parent1.count", "grp1"), new WindowedCount());
Sensor parent2 = metrics.sensor("test.parent2");
parent2.add(metrics.metricName("test.parent2.count", "grp1"), new WindowedCount());
Sensor child1 = metrics.sensor("test.child1", parent1, parent2);
child1.add(metrics.metricName("test.child1.count", "grp1"), new WindowedCount());
Sensor child2 = metrics.sensor("test.child2", parent2);
child2.add(metrics.metricName("test.child2.count", "grp1"), new WindowedCount());
Sensor grandChild1 = metrics.sensor("test.gchild2", child2);
grandChild1.add(metrics.metricName("test.gchild2.count", "grp1"), new WindowedCount());
Sensor sensor = metrics.getSensor("test.parent1");
assertNotNull(sensor);
metrics.removeSensor("test.parent1");
assertNull(metrics.getSensor("test.parent1"));
assertNull(metrics.metrics().get(metrics.metricName("test.parent1.count", "grp1")));
assertNull(metrics.getSensor("test.child1"));
assertNull(metrics.childrenSensors().get(sensor));
assertNull(metrics.metrics().get(metrics.metricName("test.child1.count", "grp1")));
sensor = metrics.getSensor("test.gchild2");
assertNotNull(sensor);
metrics.removeSensor("test.gchild2");
assertNull(metrics.getSensor("test.gchild2"));
assertNull(metrics.childrenSensors().get(sensor));
assertNull(metrics.metrics().get(metrics.metricName("test.gchild2.count", "grp1")));
sensor = metrics.getSensor("test.child2");
assertNotNull(sensor);
metrics.removeSensor("test.child2");
assertNull(metrics.getSensor("test.child2"));
assertNull(metrics.childrenSensors().get(sensor));
assertNull(metrics.metrics().get(metrics.metricName("test.child2.count", "grp1")));
sensor = metrics.getSensor("test.parent2");
assertNotNull(sensor);
metrics.removeSensor("test.parent2");
assertNull(metrics.getSensor("test.parent2"));
assertNull(metrics.childrenSensors().get(sensor));
assertNull(metrics.metrics().get(metrics.metricName("test.parent2.count", "grp1")));
assertEquals(size, metrics.metrics().size());
}
|
public static <E extends Enum<E>> FlagSet<E> createFlagSet(
final Class<E> enumClass,
final String prefix,
final EnumSet<E> flags) {
return new FlagSet<>(enumClass, prefix, flags);
}
|
@Test
public void testInequality() {
final FlagSet<SimpleEnum> s1 =
createFlagSet(SimpleEnum.class, KEYDOT, noneOf(SimpleEnum.class));
final FlagSet<SimpleEnum> s2 =
createFlagSet(SimpleEnum.class, KEYDOT, SimpleEnum.a, SimpleEnum.b);
Assertions.assertThat(s1)
.describedAs("s1 == s2")
.isNotEqualTo(s2);
}
|
@Override
public UnitExtension getUnitExtension(String extensionName) {
if (extensionName.equals("SergeantExtension")) {
return Optional.ofNullable(unitExtension).orElseGet(() -> new Sergeant(this));
}
return super.getUnitExtension(extensionName);
}
|
@Test
void getUnitExtension() {
final var unit = new SergeantUnit("SergeantUnitName");
assertNull(unit.getUnitExtension("SoldierExtension"));
assertNotNull(unit.getUnitExtension("SergeantExtension"));
assertNull(unit.getUnitExtension("CommanderExtension"));
}
|
@Override
public final synchronized V get() throws InterruptedException, ExecutionException {
while (result == null && exception == null && !cancelled) {
futureWait();
}
return getOrThrowExecutionException();
}
|
@Test
public void simpleSmackFutureSuccessTest() throws InterruptedException, ExecutionException {
InternalProcessStanzaSmackFuture<Boolean, Exception> future = new SimpleInternalProcessStanzaSmackFuture<Boolean, Exception>() {
@Override
protected void handleStanza(Stanza stanza) {
setResult(true);
}
};
future.processStanza(null);
assertTrue(future.get());
}
|
public static String convertToHtml(String input) {
return new Markdown().convert(StringEscapeUtils.escapeHtml4(input));
}
|
@Test
public void shouldDecorateEndOfLine() {
assertThat(Markdown.convertToHtml("1\r2\r\n3\n")).isEqualTo("1<br/>2<br/>3<br/>");
}
|
@Override
public int read() throws IOException {
if (mPosition == mLength) { // at end of file
return -1;
}
updateStreamIfNeeded();
int res = mUfsInStream.get().read();
if (res == -1) {
return -1;
}
mPosition++;
Metrics.BYTES_READ_FROM_UFS.inc(1);
return res;
}
|
@Test
public void twoBytesRead() throws IOException, AlluxioException {
AlluxioURI ufsPath = getUfsPath();
createFile(ufsPath, 2);
try (FileInStream inStream = getStream(ufsPath)) {
assertEquals(0, inStream.read());
assertEquals(1, inStream.read());
}
}
|
@Deprecated
@Override
public void init(final ProcessorContext context,
final StateStore root) {
this.context = context instanceof InternalProcessorContext ? (InternalProcessorContext) context : null;
taskId = context.taskId();
initStoreSerde(context);
streamsMetrics = (StreamsMetricsImpl) context.metrics();
registerMetrics();
final Sensor restoreSensor =
StateStoreMetrics.restoreSensor(taskId.toString(), metricsScope, name(), streamsMetrics);
// register and possibly restore the state from the logs
maybeMeasureLatency(() -> super.init(context, root), time, restoreSensor);
}
|
@Test
public void shouldDelegateInit() {
setUp();
final MeteredKeyValueStore<String, String> outer = new MeteredKeyValueStore<>(
inner,
STORE_TYPE,
new MockTime(),
Serdes.String(),
Serdes.String()
);
doNothing().when(inner).init((StateStoreContext) context, outer);
outer.init((StateStoreContext) context, outer);
}
|
@SuppressWarnings("unchecked")
@Override
public <S extends StateStore> S getStateStore(final String name) {
final StateStore store = stateManager.getGlobalStore(name);
return (S) getReadWriteStore(store);
}
|
@Test
public void shouldNotAllowInitForWindowStore() {
when(stateManager.getGlobalStore(GLOBAL_WINDOW_STORE_NAME)).thenReturn(mock(WindowStore.class));
final StateStore store = globalContext.getStateStore(GLOBAL_WINDOW_STORE_NAME);
try {
store.init((StateStoreContext) null, null);
fail("Should have thrown UnsupportedOperationException.");
} catch (final UnsupportedOperationException expected) { }
}
|
@Override
public String resolve(Method method, Object[] arguments, String spelExpression) {
if (StringUtils.isEmpty(spelExpression)) {
return spelExpression;
}
if (spelExpression.matches(PLACEHOLDER_SPEL_REGEX) && stringValueResolver != null) {
return stringValueResolver.resolveStringValue(spelExpression);
}
if (spelExpression.matches(METHOD_SPEL_REGEX)) {
SpelRootObject rootObject = new SpelRootObject(method, arguments);
MethodBasedEvaluationContext evaluationContext = new MethodBasedEvaluationContext(rootObject, method, arguments, parameterNameDiscoverer);
Object evaluated = expressionParser.parseExpression(spelExpression).getValue(evaluationContext);
return (String) evaluated;
}
if (spelExpression.matches(BEAN_SPEL_REGEX)) {
SpelRootObject rootObject = new SpelRootObject(method, arguments);
MethodBasedEvaluationContext evaluationContext = new MethodBasedEvaluationContext(rootObject, method, arguments, parameterNameDiscoverer);
evaluationContext.setBeanResolver(new BeanFactoryResolver(this.beanFactory));
Object evaluated = expressionParser.parseExpression(spelExpression).getValue(evaluationContext);
return (String) evaluated;
}
return spelExpression;
}
|
@Test
public void givenNonSpelExpression_whenParse_returnsItself() throws Exception {
String testExpression = "backendA";
DefaultSpelResolverTest target = new DefaultSpelResolverTest();
Method testMethod = target.getClass().getMethod("testMethod", String.class);
String result = sut.resolve(testMethod, new Object[]{}, testExpression);
assertThat(result).isEqualTo(testExpression);
}
|
@Nonnull
public static <T> Sink<T> remoteList(@Nonnull String listName, @Nonnull ClientConfig clientConfig) {
return fromProcessor("remoteListSink(" + listName + ')', writeRemoteListP(listName, clientConfig));
}
|
@Test
public void remoteList() {
// Given
populateList(srcList);
// When
Sink<Object> sink = Sinks.remoteList(sinkName, clientConfig);
// Then
p.readFrom(Sources.list(srcName)).writeTo(sink);
execute();
assertEquals(itemCount, remoteHz.getList(sinkName).size());
}
|
public GithubAppConfiguration validate(AlmSettingDto almSettingDto) {
return validate(almSettingDto.getAppId(), almSettingDto.getClientId(), almSettingDto.getClientSecret(), almSettingDto.getPrivateKey(), almSettingDto.getUrl());
}
|
@Test
public void github_validation_checks_invalid_appId() {
AlmSettingDto almSettingDto = createNewGithubDto("clientId", "clientSecret", "abc", null);
assertThatThrownBy(() -> underTest.validate(almSettingDto))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Invalid appId; For input string: \"abc\"");
}
|
@Override
public void execute(final List<String> args, final PrintWriter terminal) {
CliCmdUtil.ensureArgCountBounds(args, 0, 1, HELP);
if (args.isEmpty()) {
terminal.println(restClient.getServerAddress());
return;
} else {
final String serverAddress = args.get(0);
restClient.setServerAddress(serverAddress);
terminal.println("Server now: " + serverAddress);
resetCliForNewServer.fire();
}
validateClient(terminal, restClient);
}
|
@Test
public void shouldReportErrorIfRemoteKsqlServerIsUsingSSL() {
// Given:
reset(restClient);
givenServerAddressHandling();
when(restClient.getServerInfo()).thenThrow(sslConnectionIssue());
// When:
command.execute(ImmutableList.of(VALID_SERVER_ADDRESS), terminal);
// Then:
assertThat(out.toString(), containsString(
"Remote server at " + VALID_SERVER_ADDRESS + " looks to be configured to use HTTPS /"
+ System.lineSeparator()
+ "SSL. Please refer to the KSQL documentation on how to configure the CLI for SSL:"
+ System.lineSeparator()
+ DocumentationLinks.SECURITY_CLI_SSL_DOC_URL));
}
|
public static ParsedCommand parse(
// CHECKSTYLE_RULES.ON: CyclomaticComplexity
final String sql, final Map<String, String> variables) {
validateSupportedStatementType(sql);
final String substituted;
try {
substituted = VariableSubstitutor.substitute(KSQL_PARSER.parse(sql).get(0), variables);
} catch (ParseFailedException e) {
throw new MigrationException(String.format(
"Failed to parse the statement. Statement: %s. Reason: %s",
sql, e.getMessage()));
}
final SqlBaseParser.SingleStatementContext statementContext = KSQL_PARSER.parse(substituted)
.get(0).getStatement();
final boolean isStatement = StatementType.get(statementContext.statement().getClass())
== StatementType.STATEMENT;
return new ParsedCommand(substituted,
isStatement ? Optional.empty() : Optional.of(new AstBuilder(TypeRegistry.EMPTY)
.buildStatement(statementContext)));
}
|
@Test
public void shouldParseUndefineStatement() {
// Given:
final String undefineVar = "UNDEFINE var;";
// When:
List<CommandParser.ParsedCommand> commands = parse(undefineVar);
// Then:
assertThat(commands.size(), is(1));
assertThat(commands.get(0).getCommand(), is(undefineVar));
assertThat(commands.get(0).getStatement().isPresent(), is (true));
assertThat(commands.get(0).getStatement().get(), instanceOf(UndefineVariable.class));
assertThat(((UndefineVariable) commands.get(0).getStatement().get()).getVariableName(), is("var"));
}
|
T init(Object value) {
return (T) value;
}
|
@Test
public void testBitmapUnionAggregator() {
// init null
BitmapUnionAggregator aggregator = new BitmapUnionAggregator();
BitmapValue value = aggregator.init(null);
Assert.assertEquals(BitmapValue.EMPTY, value.getBitmapType());
// init normal value 1
aggregator = new BitmapUnionAggregator();
value = aggregator.init(1);
Assert.assertEquals(BitmapValue.SINGLE_VALUE, value.getBitmapType());
Assert.assertEquals("{1}", value.toString());
// init byte[]
byte[] bytes = new byte[] {1, 1, 0, 0, 0};
value = aggregator.init(bytes);
Assert.assertEquals(BitmapValue.SINGLE_VALUE, value.getBitmapType());
Assert.assertEquals("{1}", value.toString());
}
|
@Override
public Path getPluginsRoot() {
throw new IllegalAccessError(PLUGIN_PREFIX + currentPluginId + " tried to execute getPluginsRoot!");
}
|
@Test
public void getPluginsRoot() {
assertThrows(IllegalAccessError.class, () -> wrappedPluginManager.getPluginsRoot());
}
|
@Override
public boolean implies(Permission p) {
// By default only supports comparisons with other WildcardPermissions
if (!(p instanceof WildcardPermission)) {
return false;
}
WildcardPermission wp = (WildcardPermission) p;
List<Set<String>> otherParts = getParts(wp);
int i = 0;
for (Set<String> otherPart : otherParts) {
// If this permission has less parts than the other permission, everything after the number of parts contained
// in this permission is automatically implied, so return true
if (getParts().size() - 1 < i) {
return true;
} else {
Set<String> thisPart = getParts().get(i);
// all tokens from otherPart must pass at least one token from thisPart
for (String otherToken : otherPart) {
if (!caseSensitive) {
otherToken = otherToken.toLowerCase();
}
boolean otherIsMatched = false;
for (String token : thisPart) {
if (token.equals(WILDCARD_TOKEN)) {
otherIsMatched = true;
break;
}
if (matches(token, otherToken)) {
otherIsMatched = true;
break;
}
}
if (!otherIsMatched) {
return false;
}
}
i++;
}
}
// If this permission has more parts than the other parts, only imply it if all of the other parts are wildcards
for (; i < getParts().size(); i++) {
Set<String> part = getParts().get(i);
if (!part.contains(WILDCARD_TOKEN)) {
return false;
}
}
return true;
}
|
@Test
public void testIntrapartWildcard() {
ActiveMQWildcardPermission superset = new ActiveMQWildcardPermission("topic:ActiveMQ.Advisory.*:read");
ActiveMQWildcardPermission subset = new ActiveMQWildcardPermission("topic:ActiveMQ.Advisory.Topic:read");
assertTrue(superset.implies(subset));
assertFalse(subset.implies(superset));
}
|
@Override
public Optional<Entity> exportEntity(EntityDescriptor entityDescriptor, EntityDescriptorIds entityDescriptorIds) {
final ModelId modelId = entityDescriptor.id();
final Collector collector = collectorService.find(modelId.id());
if (isNull(collector)) {
LOG.debug("Couldn't find collector {}", entityDescriptor);
return Optional.empty();
}
return Optional.of(exportNativeEntity(collector, entityDescriptorIds));
}
|
@Test
@MongoDBFixtures("SidecarCollectorFacadeTest.json")
public void exportEntity() {
final EntityDescriptor descriptor = EntityDescriptor.create("5b4c920b4b900a0024af0001", ModelTypes.SIDECAR_COLLECTOR_V1);
final EntityDescriptorIds entityDescriptorIds = EntityDescriptorIds.of(descriptor);
final Entity entity = facade.exportEntity(descriptor, entityDescriptorIds).orElseThrow(AssertionError::new);
assertThat(entity).isInstanceOf(EntityV1.class);
assertThat(entity.id()).isEqualTo(ModelId.of(entityDescriptorIds.get(descriptor).orElse(null)));
assertThat(entity.type()).isEqualTo(ModelTypes.SIDECAR_COLLECTOR_V1);
final EntityV1 entityV1 = (EntityV1) entity;
final SidecarCollectorEntity collectorEntity = objectMapper.convertValue(entityV1.data(), SidecarCollectorEntity.class);
assertThat(collectorEntity.name()).isEqualTo(ValueReference.of("filebeat"));
assertThat(collectorEntity.serviceType()).isEqualTo(ValueReference.of("exec"));
assertThat(collectorEntity.nodeOperatingSystem()).isEqualTo(ValueReference.of("linux"));
assertThat(collectorEntity.executablePath()).isEqualTo(ValueReference.of("/usr/lib/graylog-sidecar/filebeat"));
assertThat(collectorEntity.executeParameters()).isEqualTo(ValueReference.of("-c %s"));
assertThat(collectorEntity.validationParameters()).isEqualTo(ValueReference.of("test config -c %s"));
assertThat(collectorEntity.defaultTemplate()).isEqualTo(ValueReference.of(""));
}
|
public Node deserializeObject(JsonReader reader) {
Log.info("Deserializing JSON to Node.");
JsonObject jsonObject = reader.readObject();
return deserializeObject(jsonObject);
}
|
@Test
void testPrimitiveType() {
Type type = parseType("int");
String serialized = serialize(type, false);
Node deserialized = deserializer.deserializeObject(Json.createReader(new StringReader(serialized)));
assertEqualsStringIgnoringEol("int", deserialized.toString());
assertEquals(type.hashCode(), deserialized.hashCode());
}
|
@Subscribe
public void onChatMessage(ChatMessage event)
{
if (event.getType() == ChatMessageType.GAMEMESSAGE || event.getType() == ChatMessageType.SPAM)
{
String message = Text.removeTags(event.getMessage());
Matcher dodgyCheckMatcher = DODGY_CHECK_PATTERN.matcher(message);
Matcher dodgyProtectMatcher = DODGY_PROTECT_PATTERN.matcher(message);
Matcher dodgyBreakMatcher = DODGY_BREAK_PATTERN.matcher(message);
Matcher bindingNecklaceCheckMatcher = BINDING_CHECK_PATTERN.matcher(message);
Matcher bindingNecklaceUsedMatcher = BINDING_USED_PATTERN.matcher(message);
Matcher ringOfForgingCheckMatcher = RING_OF_FORGING_CHECK_PATTERN.matcher(message);
Matcher amuletOfChemistryCheckMatcher = AMULET_OF_CHEMISTRY_CHECK_PATTERN.matcher(message);
Matcher amuletOfChemistryUsedMatcher = AMULET_OF_CHEMISTRY_USED_PATTERN.matcher(message);
Matcher amuletOfChemistryBreakMatcher = AMULET_OF_CHEMISTRY_BREAK_PATTERN.matcher(message);
Matcher amuletOfBountyCheckMatcher = AMULET_OF_BOUNTY_CHECK_PATTERN.matcher(message);
Matcher amuletOfBountyUsedMatcher = AMULET_OF_BOUNTY_USED_PATTERN.matcher(message);
Matcher chronicleAddMatcher = CHRONICLE_ADD_PATTERN.matcher(message);
Matcher chronicleUseAndCheckMatcher = CHRONICLE_USE_AND_CHECK_PATTERN.matcher(message);
Matcher slaughterActivateMatcher = BRACELET_OF_SLAUGHTER_ACTIVATE_PATTERN.matcher(message);
Matcher slaughterCheckMatcher = BRACELET_OF_SLAUGHTER_CHECK_PATTERN.matcher(message);
Matcher expeditiousActivateMatcher = EXPEDITIOUS_BRACELET_ACTIVATE_PATTERN.matcher(message);
Matcher expeditiousCheckMatcher = EXPEDITIOUS_BRACELET_CHECK_PATTERN.matcher(message);
Matcher bloodEssenceCheckMatcher = BLOOD_ESSENCE_CHECK_PATTERN.matcher(message);
Matcher bloodEssenceExtractMatcher = BLOOD_ESSENCE_EXTRACT_PATTERN.matcher(message);
Matcher braceletOfClayCheckMatcher = BRACELET_OF_CLAY_CHECK_PATTERN.matcher(message);
if (message.contains(RING_OF_RECOIL_BREAK_MESSAGE))
{
notifier.notify(config.recoilNotification(), "Your Ring of Recoil has shattered");
}
else if (dodgyBreakMatcher.find())
{
notifier.notify(config.dodgyNotification(), "Your dodgy necklace has crumbled to dust.");
updateDodgyNecklaceCharges(MAX_DODGY_CHARGES);
}
else if (dodgyCheckMatcher.find())
{
updateDodgyNecklaceCharges(Integer.parseInt(dodgyCheckMatcher.group(1)));
}
else if (dodgyProtectMatcher.find())
{
updateDodgyNecklaceCharges(Integer.parseInt(dodgyProtectMatcher.group(1)));
}
else if (amuletOfChemistryCheckMatcher.find())
{
updateAmuletOfChemistryCharges(Integer.parseInt(amuletOfChemistryCheckMatcher.group(1)));
}
else if (amuletOfChemistryUsedMatcher.find())
{
final String match = amuletOfChemistryUsedMatcher.group(1);
int charges = 1;
if (!match.equals("one"))
{
charges = Integer.parseInt(match);
}
updateAmuletOfChemistryCharges(charges);
}
else if (amuletOfChemistryBreakMatcher.find())
{
notifier.notify(config.amuletOfChemistryNotification(), "Your amulet of chemistry has crumbled to dust.");
updateAmuletOfChemistryCharges(MAX_AMULET_OF_CHEMISTRY_CHARGES);
}
else if (amuletOfBountyCheckMatcher.find())
{
updateAmuletOfBountyCharges(Integer.parseInt(amuletOfBountyCheckMatcher.group(1)));
}
else if (amuletOfBountyUsedMatcher.find())
{
updateAmuletOfBountyCharges(Integer.parseInt(amuletOfBountyUsedMatcher.group(1)));
}
else if (message.equals(AMULET_OF_BOUNTY_BREAK_TEXT))
{
updateAmuletOfBountyCharges(MAX_AMULET_OF_BOUNTY_CHARGES);
}
else if (message.contains(BINDING_BREAK_TEXT))
{
notifier.notify(config.bindingNotification(), BINDING_BREAK_TEXT);
// This chat message triggers before the used message so add 1 to the max charges to ensure proper sync
updateBindingNecklaceCharges(MAX_BINDING_CHARGES + 1);
}
else if (bindingNecklaceUsedMatcher.find())
{
final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT);
if (equipment.contains(ItemID.BINDING_NECKLACE))
{
updateBindingNecklaceCharges(getItemCharges(ItemChargeConfig.KEY_BINDING_NECKLACE) - 1);
}
}
else if (bindingNecklaceCheckMatcher.find())
{
final String match = bindingNecklaceCheckMatcher.group(1);
int charges = 1;
if (!match.equals("one"))
{
charges = Integer.parseInt(match);
}
updateBindingNecklaceCharges(charges);
}
else if (ringOfForgingCheckMatcher.find())
{
final String match = ringOfForgingCheckMatcher.group(1);
int charges = 1;
if (!match.equals("one"))
{
charges = Integer.parseInt(match);
}
updateRingOfForgingCharges(charges);
}
else if (message.equals(RING_OF_FORGING_USED_TEXT) || message.equals(RING_OF_FORGING_VARROCK_PLATEBODY))
{
final ItemContainer inventory = client.getItemContainer(InventoryID.INVENTORY);
final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT);
// Determine if the player smelted with a Ring of Forging equipped.
if (equipment == null)
{
return;
}
if (equipment.contains(ItemID.RING_OF_FORGING) && (message.equals(RING_OF_FORGING_USED_TEXT) || inventory.count(ItemID.IRON_ORE) > 1))
{
int charges = Ints.constrainToRange(getItemCharges(ItemChargeConfig.KEY_RING_OF_FORGING) - 1, 0, MAX_RING_OF_FORGING_CHARGES);
updateRingOfForgingCharges(charges);
}
}
else if (message.equals(RING_OF_FORGING_BREAK_TEXT))
{
notifier.notify(config.ringOfForgingNotification(), "Your ring of forging has melted.");
// This chat message triggers before the used message so add 1 to the max charges to ensure proper sync
updateRingOfForgingCharges(MAX_RING_OF_FORGING_CHARGES + 1);
}
else if (chronicleAddMatcher.find())
{
final String match = chronicleAddMatcher.group(1);
if (match.equals("one"))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1);
}
else
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, Integer.parseInt(match));
}
}
else if (chronicleUseAndCheckMatcher.find())
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, Integer.parseInt(chronicleUseAndCheckMatcher.group(1)));
}
else if (message.equals(CHRONICLE_ONE_CHARGE_TEXT))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1);
}
else if (message.equals(CHRONICLE_EMPTY_TEXT) || message.equals(CHRONICLE_NO_CHARGES_TEXT))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 0);
}
else if (message.equals(CHRONICLE_FULL_TEXT))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1000);
}
else if (slaughterActivateMatcher.find())
{
final String found = slaughterActivateMatcher.group(1);
if (found == null)
{
updateBraceletOfSlaughterCharges(MAX_SLAYER_BRACELET_CHARGES);
notifier.notify(config.slaughterNotification(), BRACELET_OF_SLAUGHTER_BREAK_TEXT);
}
else
{
updateBraceletOfSlaughterCharges(Integer.parseInt(found));
}
}
else if (slaughterCheckMatcher.find())
{
updateBraceletOfSlaughterCharges(Integer.parseInt(slaughterCheckMatcher.group(1)));
}
else if (expeditiousActivateMatcher.find())
{
final String found = expeditiousActivateMatcher.group(1);
if (found == null)
{
updateExpeditiousBraceletCharges(MAX_SLAYER_BRACELET_CHARGES);
notifier.notify(config.expeditiousNotification(), EXPEDITIOUS_BRACELET_BREAK_TEXT);
}
else
{
updateExpeditiousBraceletCharges(Integer.parseInt(found));
}
}
else if (expeditiousCheckMatcher.find())
{
updateExpeditiousBraceletCharges(Integer.parseInt(expeditiousCheckMatcher.group(1)));
}
else if (bloodEssenceCheckMatcher.find())
{
updateBloodEssenceCharges(Integer.parseInt(bloodEssenceCheckMatcher.group(1)));
}
else if (bloodEssenceExtractMatcher.find())
{
updateBloodEssenceCharges(getItemCharges(ItemChargeConfig.KEY_BLOOD_ESSENCE) - Integer.parseInt(bloodEssenceExtractMatcher.group(1)));
}
else if (message.contains(BLOOD_ESSENCE_ACTIVATE_TEXT))
{
updateBloodEssenceCharges(MAX_BLOOD_ESSENCE_CHARGES);
}
else if (braceletOfClayCheckMatcher.find())
{
updateBraceletOfClayCharges(Integer.parseInt(braceletOfClayCheckMatcher.group(1)));
}
else if (message.equals(BRACELET_OF_CLAY_USE_TEXT) || message.equals(BRACELET_OF_CLAY_USE_TEXT_TRAHAEARN))
{
final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT);
// Determine if the player mined with a Bracelet of Clay equipped.
if (equipment != null && equipment.contains(ItemID.BRACELET_OF_CLAY))
{
final ItemContainer inventory = client.getItemContainer(InventoryID.INVENTORY);
// Charge is not used if only 1 inventory slot is available when mining in Prifddinas
boolean ignore = inventory != null
&& inventory.count() == 27
&& message.equals(BRACELET_OF_CLAY_USE_TEXT_TRAHAEARN);
if (!ignore)
{
int charges = Ints.constrainToRange(getItemCharges(ItemChargeConfig.KEY_BRACELET_OF_CLAY) - 1, 0, MAX_BRACELET_OF_CLAY_CHARGES);
updateBraceletOfClayCharges(charges);
}
}
}
else if (message.equals(BRACELET_OF_CLAY_BREAK_TEXT))
{
notifier.notify(config.braceletOfClayNotification(), "Your bracelet of clay has crumbled to dust");
updateBraceletOfClayCharges(MAX_BRACELET_OF_CLAY_CHARGES);
}
}
}
|
@Test
public void testChronicleTeleport()
{
ChatMessage chatMessage = new ChatMessage(null, ChatMessageType.GAMEMESSAGE, "", CHRONICLE_TELEPORT, "", 0);
itemChargePlugin.onChatMessage(chatMessage);
verify(configManager).setRSProfileConfiguration(ItemChargeConfig.GROUP, ItemChargeConfig.KEY_CHRONICLE, 999);
}
|
public static Read read() {
return new AutoValue_MqttIO_Read.Builder()
.setMaxReadTime(null)
.setMaxNumRecords(Long.MAX_VALUE)
.build();
}
|
@Test(timeout = 30 * 1000)
public void testReceiveWithTimeoutAndNoData() throws Exception {
pipeline.apply(
MqttIO.read()
.withConnectionConfiguration(
MqttIO.ConnectionConfiguration.create("tcp://localhost:" + port, "READ_TOPIC")
.withClientId("READ_PIPELINE"))
.withMaxReadTime(Duration.standardSeconds(2)));
// should stop before the test timeout
pipeline.run();
}
|
public static void releaseLock(FileLock lock) throws IOException {
String lockPath = LOCK_MAP.remove(lock);
if (lockPath == null) {
throw new LockException("Cannot release unobtained lock");
}
lock.release();
lock.channel().close();
boolean removed = LOCK_HELD.remove(lockPath);
if (!removed) {
throw new LockException("Lock path was not marked as held: " + lockPath);
}
}
|
@Test(expected = LockException.class)
public void ReleaseUnobtainedLock() throws IOException {
FileLockFactory.releaseLock(lock);
FileLockFactory.releaseLock(lock);
}
|
@Override
public ConnectResponse<String> delete(final String connector) {
try {
LOG.debug("Issuing request to Kafka Connect at URI {} to delete {}",
connectUri, connector);
final ConnectResponse<String> connectResponse = withRetries(() -> Request
.delete(resolveUri(String.format("%s/%s", CONNECTORS, connector)))
.setHeaders(requestHeaders)
.responseTimeout(Timeout.ofMilliseconds(requestTimeoutMs))
.connectTimeout(Timeout.ofMilliseconds(requestTimeoutMs))
.execute(httpClient)
.handleResponse(
createHandler(
ImmutableList.of(HttpStatus.SC_NO_CONTENT, HttpStatus.SC_OK),
new TypeReference<Object>() {},
foo -> connector)));
connectResponse.error()
.ifPresent(error -> LOG.warn("Could not delete connector: {}.", error));
return connectResponse;
} catch (final Exception e) {
throw new KsqlServerException(e);
}
}
|
@Test
public void testDeleteWithStatusNoContentResponse() throws JsonProcessingException {
// Given:
WireMock.stubFor(
WireMock.delete(WireMock.urlEqualTo(pathPrefix + "/connectors/foo"))
.withHeader(AUTHORIZATION.toString(), new EqualToPattern(AUTH_HEADER))
.withHeader(CUSTOM_HEADER_NAME, new EqualToPattern(CUSTOM_HEADER_VALUE))
.willReturn(WireMock.aResponse()
.withStatus(HttpStatus.SC_NO_CONTENT))
);
// When:
final ConnectResponse<String> response = client.delete("foo");
// Then:
assertThat(response.datum(), OptionalMatchers.of(is("foo")));
assertThat("Expected no error!", !response.error().isPresent());
}
|
public int format(String... args) throws UsageException {
CommandLineOptions parameters = processArgs(args);
if (parameters.version()) {
errWriter.println(versionString());
return 0;
}
if (parameters.help()) {
throw new UsageException();
}
JavaFormatterOptions options =
JavaFormatterOptions.builder()
.style(parameters.aosp() ? Style.AOSP : Style.GOOGLE)
.formatJavadoc(parameters.formatJavadoc())
.build();
if (parameters.stdin()) {
return formatStdin(parameters, options);
} else {
return formatFiles(parameters, options);
}
}
|
@Test
public void noFormatJavadoc() throws Exception {
String[] input = {
"/**",
" * graph",
" *",
" * graph",
" *",
" * @param foo lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do"
+ " eiusmod tempor incididunt ut labore et dolore magna aliqua",
" */",
"class Test {",
" /**",
" * creates entropy",
" */",
" public static void main(String... args) {}",
"}",
"",
};
InputStream in = new ByteArrayInputStream(joiner.join(input).getBytes(UTF_8));
StringWriter out = new StringWriter();
Main main =
new Main(
new PrintWriter(out, true),
new PrintWriter(new BufferedWriter(new OutputStreamWriter(System.err, UTF_8)), true),
in);
assertThat(main.format("--skip-javadoc-formatting", "-")).isEqualTo(0);
assertThat(out.toString()).isEqualTo(joiner.join(input));
}
|
@Override
public void write(Message message) throws Exception {
if (LOG.isTraceEnabled()) {
LOG.trace("Writing message id to [{}]: <{}>", NAME, message.getId());
}
writeMessageEntries(List.of(DefaultFilteredMessage.forDestinationKeys(message, Set.of(FILTER_KEY))));
}
|
@Test
public void write() throws Exception {
final List<Message> messageList = buildMessages(3);
for (final var message : messageList) {
output.write(message);
}
verify(messages, times(1)).bulkIndex(eq(List.of(
new MessageWithIndex(wrap(messageList.get(0)), defaultIndexSet)
)));
verify(messages, times(1)).bulkIndex(eq(List.of(
new MessageWithIndex(wrap(messageList.get(1)), defaultIndexSet)
)));
verify(messages, times(1)).bulkIndex(eq(List.of(
new MessageWithIndex(wrap(messageList.get(2)), defaultIndexSet)
)));
verifyNoMoreInteractions(messages);
}
|
@Override
public Block toBlock(Type desiredType)
{
checkArgument(BIGINT.equals(desiredType), "type doesn't match: %s", desiredType);
int numberOfRecords = numberOfRecords();
return new LongArrayBlock(
numberOfRecords,
Optional.ofNullable(nulls),
longs == null ? new long[numberOfRecords] : longs);
}
|
@Test
public void testReadBlockAllNonNullOption1()
{
PrestoThriftBlock columnsData = longColumn(
null,
new long[] {2, 7, 1, 3, 8, 4, 5});
Block actual = columnsData.toBlock(BIGINT);
assertBlockEquals(actual, list(2L, 7L, 1L, 3L, 8L, 4L, 5L));
}
|
public T send() throws IOException {
return web3jService.send(this, responseType);
}
|
@Test
public void testEthGetBlockByHash() throws Exception {
web3j.ethGetBlockByHash(
"0xe670ec64341771606e55d6b4ca35a1a6b75ee3d5145a99d05921026d1527331", true)
.send();
verifyResult(
"{\"jsonrpc\":\"2.0\",\"method\":\"eth_getBlockByHash\",\"params\":["
+ "\"0xe670ec64341771606e55d6b4ca35a1a6b75ee3d5145a99d05921026d1527331\""
+ ",true],\"id\":1}");
}
|
@VisibleForTesting
CompletableFuture<Void> cancelBackgroundApnsNotifications(final Account account, final Device device) {
return pushSchedulingCluster.withCluster(connection -> connection.async()
.zrem(getPendingBackgroundApnsNotificationQueueKey(account, device), encodeAciAndDeviceId(account, device)))
.thenRun(Util.NOOP)
.toCompletableFuture();
}
|
@Test
void testCancelBackgroundApnsNotifications() {
final Instant now = Instant.now().truncatedTo(ChronoUnit.MILLIS);
clock.pin(now);
pushNotificationScheduler.scheduleBackgroundApnsNotification(account, device).toCompletableFuture().join();
pushNotificationScheduler.cancelBackgroundApnsNotifications(account, device).join();
assertEquals(Optional.empty(),
pushNotificationScheduler.getLastBackgroundApnsNotificationTimestamp(account, device));
assertEquals(Optional.empty(),
pushNotificationScheduler.getNextScheduledBackgroundApnsNotificationTimestamp(account, device));
}
|
@Override
public List<DefaultIssue> getIssues(Component component) {
if (component.getType() == Component.Type.DIRECTORY) {
// No issues on directories
return Collections.emptyList();
}
checkState(this.component != null && this.issues != null, "Issues have not been initialized");
checkArgument(component.equals(this.component),
"Only issues from component '%s' are available, but wanted component is '%s'.",
this.component.getReportAttributes().getRef(), component.getReportAttributes().getRef());
return issues;
}
|
@Test
public void fail_with_ISE_when_getting_issues_but_issues_are_null() {
assertThatThrownBy(() -> {
sut.getIssues(FILE_1);
})
.isInstanceOf(IllegalStateException.class)
.hasMessage("Issues have not been initialized");
}
|
static String headerLine(CSVFormat csvFormat) {
return String.join(String.valueOf(csvFormat.getDelimiter()), csvFormat.getHeader());
}
|
@Test
public void givenQuoteModeMinimal_isNoop() {
CSVFormat csvFormat = csvFormat().withQuoteMode(QuoteMode.MINIMAL);
PCollection<String> input =
pipeline.apply(Create.of(headerLine(csvFormat), "\"a,\",1,1.1", "b,2,2.2", "c,3,3.3"));
CsvIOStringToCsvRecord underTest = new CsvIOStringToCsvRecord(csvFormat);
CsvIOParseResult<List<String>> result = input.apply(underTest);
PAssert.that(result.getOutput())
.containsInAnyOrder(
Arrays.asList(
Arrays.asList("a,", "1", "1.1"),
Arrays.asList("b", "2", "2.2"),
Arrays.asList("c", "3", "3.3")));
PAssert.that(result.getErrors()).empty();
pipeline.run();
}
|
@Deprecated
@Override
public void init(final ProcessorContext context,
final StateStore root) {
this.context = asInternalProcessorContext(context);
super.init(context, root);
maybeSetEvictionListener();
}
|
@Test
public void shouldDelegateInit() {
final InternalMockProcessorContext context = mockContext();
final KeyValueStore<Bytes, byte[]> innerMock = mock(InMemoryKeyValueStore.class);
final StateStore outer = new ChangeLoggingKeyValueBytesStore(innerMock);
outer.init((StateStoreContext) context, outer);
verify(innerMock).init((StateStoreContext) context, outer);
}
|
public static void deregisterServer(HazelcastInstance instance) {
deregister(SERVER_INSTANCES_REF, instance);
}
|
@Test(expected = IllegalArgumentException.class)
public void deregister_null() {
OutOfMemoryErrorDispatcher.deregisterServer(null);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.