focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
public Collection<Long> prune(int columnId, HashDistributionKey hashKey, int complex) {
if (columnId == distributionColumns.size()) {
// compute Hash Key
long hashValue = hashKey.getHashValue();
return Lists.newArrayList(bucketsList.get((int) ((hashValue & 0xffffffff) % hashMod)));
}
Column keyColumn = distributionColumns.get(columnId);
PartitionColumnFilter filter = distributionColumnFilters.get(keyColumn.getName());
if (null == filter) {
// no filter in this column, no partition Key
// return all subPartition
return bucketsList;
}
List<LiteralExpr> inPredicateLiterals = filter.getInPredicateLiterals();
if (null == inPredicateLiterals ||
inPredicateLiterals.size() * complex > Config.max_distribution_pruner_recursion_depth) {
LiteralExpr lowerBound = filter.getLowerBound();
LiteralExpr upperBound = filter.getUpperBound();
// equal one value
if (filter.lowerBoundInclusive && filter.upperBoundInclusive
&& lowerBound != null && upperBound != null
&& 0 == lowerBound.compareLiteral(upperBound)) {
try {
boolean isConvertToDate = PartitionUtil.isConvertToDate(keyColumn.getType(), lowerBound.getType());
hashKey.pushColumn(filter.getLowerBound(isConvertToDate), keyColumn.getType());
Collection<Long> result = prune(columnId + 1, hashKey, complex);
hashKey.popColumn();
return result;
} catch (Exception e) {
LOG.warn("Prune distribution key {} with predicate {} failed:", keyColumn, filter, e);
return bucketsList;
}
}
// return all SubPartition
return bucketsList;
}
InPredicate inPredicate = filter.getInPredicate();
if (null != inPredicate && !(inPredicate.getChild(0) instanceof SlotRef)) {
// return all SubPartition
return bucketsList;
}
Set<Long> resultSet = Sets.newHashSet();
int inElementNum = inPredicateLiterals.size();
int newComplex = inElementNum * complex;
for (LiteralExpr expr : inPredicateLiterals) {
hashKey.pushColumn(expr, keyColumn.getType());
Collection<Long> subList = prune(columnId + 1, hashKey, newComplex);
resultSet.addAll(subList);
hashKey.popColumn();
if (resultSet.size() >= bucketsList.size()) {
break;
}
}
return resultSet;
}
|
@Test
public void test2() {
List<Long> tabletIds = Lists.newArrayListWithExpectedSize(300);
for (long i = 0; i < 300; i++) {
tabletIds.add(i);
}
// distribution columns
Column dealDate = new Column("dealDate", Type.DATE, false);
Column mainBrandId = new Column("main_brand_id", Type.CHAR, false);
Column itemThirdCateId = new Column("item_third_cate_id", Type.CHAR, false);
Column channel = new Column("channel", Type.CHAR, false);
Column shopType = new Column("shop_type", Type.CHAR, false);
List<Column> columns = Lists.newArrayList(dealDate, mainBrandId, itemThirdCateId, channel, shopType);
// filters
PartitionColumnFilter mainBrandFilter = new PartitionColumnFilter();
List<Expr> inList = Lists.newArrayList();
inList.add(new StringLiteral("1323"));
inList.add(new StringLiteral("2528"));
inList.add(new StringLiteral("9610"));
inList.add(new StringLiteral("3893"));
inList.add(new StringLiteral("6121"));
mainBrandFilter.setInPredicate(
new InPredicate(new FunctionCallExpr("abs", Lists.newArrayList(new SlotRef(null, "main_brand_id"))),
inList, false));
Map<String, PartitionColumnFilter> filters = Maps.newHashMap();
filters.put("main_brand_id", mainBrandFilter);
HashDistributionPruner pruner = new HashDistributionPruner(tabletIds, columns, filters, tabletIds.size());
Collection<Long> results = pruner.prune();
Assert.assertEquals(tabletIds.size(), results.size());
}
|
public static List<String> readLines(Path path, boolean ignoreComments) throws IOException {
File file = path.toFile();
if (!file.isFile()) {
return new ArrayList<>();
}
List<String> lines = new ArrayList<>();
try (BufferedReader reader = new BufferedReader(new FileReader(file))) {
String line;
while ((line = reader.readLine()) != null) {
if (!(ignoreComments && line.startsWith("#")) && !lines.contains(line)) {
lines.add(line);
}
}
}
return lines;
}
|
@Test
public void readLinesIgnoreCommentTest() throws IOException {
File file = createSampleFile("test");
// ignoreComments = true
List<String> ignoreCommentsLines = FileUtils.readLines(file.toPath(), true);
assertEquals("1 content", ignoreCommentsLines.get(0));
assertEquals(2, ignoreCommentsLines.size());
// ignoreComments = false
List<String> lines = FileUtils.readLines(file.toPath(), false);
assertEquals("# 1 comment", lines.get(0));
assertEquals(4, lines.size());
file.deleteOnExit();
}
|
public void log(QueryLogParams params) {
_logger.debug("Broker Response: {}", params._response);
if (!(_logRateLimiter.tryAcquire() || shouldForceLog(params))) {
_numDroppedLogs.incrementAndGet();
return;
}
final StringBuilder queryLogBuilder = new StringBuilder();
for (QueryLogEntry value : QUERY_LOG_ENTRY_VALUES) {
value.format(queryLogBuilder, this, params);
queryLogBuilder.append(',');
}
// always log the query last - don't add this to the QueryLogEntry enum
queryLogBuilder.append("query=")
.append(StringUtils.substring(params._requestContext.getQuery(), 0, _maxQueryLengthToLog));
_logger.info(queryLogBuilder.toString());
if (_droppedLogRateLimiter.tryAcquire()) {
// use getAndSet to 0 so that there will be no race condition between
// loggers that increment this counter and this thread
long numDroppedLogsSinceLastLog = _numDroppedLogs.getAndSet(0);
if (numDroppedLogsSinceLastLog > 0) {
_logger.warn("{} logs were dropped. (log max rate per second: {})", numDroppedLogsSinceLastLog,
_logRateLimiter.getRate());
}
}
}
|
@Test
public void shouldForceLogWhenNumGroupsLimitIsReached() {
// Given:
Mockito.when(_logRateLimiter.tryAcquire()).thenReturn(false);
QueryLogger.QueryLogParams params = generateParams(true, 0, 456);
QueryLogger queryLogger = new QueryLogger(_logRateLimiter, 100, true, _logger, _droppedRateLimiter);
// When:
queryLogger.log(params);
// Then:
Assert.assertEquals(_infoLog.size(), 1);
}
|
@Nullable
@Override
public Message decode(@Nonnull RawMessage rawMessage) {
final String msg = new String(rawMessage.getPayload(), charset);
try (Timer.Context ignored = this.decodeTime.time()) {
final ResolvableInetSocketAddress address = rawMessage.getRemoteAddress();
final InetSocketAddress remoteAddress;
if (address == null) {
remoteAddress = null;
} else {
remoteAddress = address.getInetSocketAddress();
}
return parse(msg, remoteAddress == null ? null : remoteAddress.getAddress(), rawMessage.getTimestamp());
}
}
|
@Test
public void testDecodeStructuredIssue845WithExpandStructuredData() throws Exception {
when(configuration.getBoolean(SyslogCodec.CK_EXPAND_STRUCTURED_DATA)).thenReturn(true);
final SyslogCodec codec = new SyslogCodec(configuration, metricRegistry, messageFactory);
final Message message = codec.decode(buildRawMessage(STRUCTURED_ISSUE_845));
assertNotNull(message);
assertEquals("User page 13 requested", message.getMessage());
assertEquals(new DateTime("2015-01-06T20:56:33.287Z", DateTimeZone.UTC), ((DateTime) message.getField("timestamp")).withZone(DateTimeZone.UTC));
assertEquals("app-1", message.getField("source"));
assertEquals(6, message.getField("level"));
assertEquals("local7", message.getField("facility"));
assertEquals("::ffff:132.123.15.30", message.getField("mdc@18060_ip"));
assertEquals("{c.corp.Handler}", message.getField("mdc@18060_logger"));
assertEquals("4ot7", message.getField("mdc@18060_session"));
assertEquals("user@example.com", message.getField("mdc@18060_user"));
assertEquals("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.2.5 (KHTML, like Gecko) Version/7.1.2 Safari/537.85.11", message.getField("mdc@18060_user-agent"));
assertEquals("app", message.getField("application_name"));
assertEquals(23, message.getField("facility_num"));
}
|
public void execute() {
new PathAwareCrawler<>(
FormulaExecutorComponentVisitor.newBuilder(metricRepository, measureRepository).buildFor(formulas))
.visit(treeRootHolder.getReportTreeRoot());
}
|
@Test
public void compute_duplicated_lines_counts_lines_from_original_and_ignores_CrossProjectDuplicate() {
TextBlock original = new TextBlock(1, 1);
duplicationRepository.addCrossProjectDuplication(FILE_1_REF, original, SOME_FILE_KEY, new TextBlock(2, 2));
underTest.execute();
assertRawMeasureValue(FILE_1_REF, DUPLICATED_LINES_KEY, 1);
}
|
@Override
public void doPushWithCallback(String clientId, Subscriber subscriber, PushDataWrapper data,
NamingPushCallback callBack) {
ServiceInfo actualServiceInfo = replaceServiceInfoName(data, subscriber);
callBack.setActualServiceInfo(actualServiceInfo);
pushService.pushDataWithCallback(subscriber, handleClusterData(actualServiceInfo, subscriber), callBack);
}
|
@Test
void testDoPushWithCallback() {
doAnswer(new CallbackAnswer()).when(pushService).pushDataWithCallback(eq(subscriber), any(ServiceInfo.class), eq(pushCallBack));
pushExecutor.doPushWithCallback(rpcClientId, subscriber, pushData, pushCallBack);
verify(pushCallBack).onSuccess();
}
|
@Override
public Set<UnloadDecision> findBundlesForUnloading(LoadManagerContext context,
Map<String, Long> recentlyUnloadedBundles,
Map<String, Long> recentlyUnloadedBrokers) {
final var conf = context.brokerConfiguration();
decisionCache.clear();
stats.clear();
Map<String, BrokerLookupData> availableBrokers;
try {
availableBrokers = context.brokerRegistry().getAvailableBrokerLookupDataAsync()
.get(context.brokerConfiguration().getMetadataStoreOperationTimeoutSeconds(), TimeUnit.SECONDS);
} catch (ExecutionException | InterruptedException | TimeoutException e) {
counter.update(Failure, Unknown);
log.warn("Failed to fetch available brokers. Stop unloading.", e);
return decisionCache;
}
try {
final var loadStore = context.brokerLoadDataStore();
stats.setLoadDataStore(loadStore);
boolean debugMode = ExtensibleLoadManagerImpl.debug(conf, log);
var skipReason = stats.update(
context.brokerLoadDataStore(), availableBrokers, recentlyUnloadedBrokers, conf);
if (skipReason.isPresent()) {
if (debugMode) {
log.warn(CANNOT_CONTINUE_UNLOAD_MSG
+ " Skipped the load stat update. Reason:{}.",
skipReason.get());
}
counter.update(Skip, skipReason.get());
return decisionCache;
}
counter.updateLoadData(stats.avg, stats.std);
if (debugMode) {
log.info("brokers' load stats:{}", stats);
}
// skip metrics
int numOfBrokersWithEmptyLoadData = 0;
int numOfBrokersWithFewBundles = 0;
final double targetStd = conf.getLoadBalancerBrokerLoadTargetStd();
boolean transfer = conf.isLoadBalancerTransferEnabled();
if (stats.std() > targetStd
|| isUnderLoaded(context, stats.peekMinBroker(), stats)
|| isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) {
unloadConditionHitCount++;
} else {
unloadConditionHitCount = 0;
}
if (unloadConditionHitCount <= conf.getLoadBalancerSheddingConditionHitCountThreshold()) {
if (debugMode) {
log.info(CANNOT_CONTINUE_UNLOAD_MSG
+ " Shedding condition hit count:{} is less than or equal to the threshold:{}.",
unloadConditionHitCount, conf.getLoadBalancerSheddingConditionHitCountThreshold());
}
counter.update(Skip, HitCount);
return decisionCache;
}
while (true) {
if (!stats.hasTransferableBrokers()) {
if (debugMode) {
log.info(CANNOT_CONTINUE_UNLOAD_MSG
+ " Exhausted target transfer brokers.");
}
break;
}
UnloadDecision.Reason reason;
if (stats.std() > targetStd) {
reason = Overloaded;
} else if (isUnderLoaded(context, stats.peekMinBroker(), stats)) {
reason = Underloaded;
if (debugMode) {
log.info(String.format("broker:%s is underloaded:%s although "
+ "load std:%.2f <= targetStd:%.2f. "
+ "Continuing unload for this underloaded broker.",
stats.peekMinBroker(),
context.brokerLoadDataStore().get(stats.peekMinBroker()).get(),
stats.std(), targetStd));
}
} else if (isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) {
reason = Overloaded;
if (debugMode) {
log.info(String.format("broker:%s is overloaded:%s although "
+ "load std:%.2f <= targetStd:%.2f. "
+ "Continuing unload for this overloaded broker.",
stats.peekMaxBroker(),
context.brokerLoadDataStore().get(stats.peekMaxBroker()).get(),
stats.std(), targetStd));
}
} else {
if (debugMode) {
log.info(CANNOT_CONTINUE_UNLOAD_MSG
+ "The overall cluster load meets the target, std:{} <= targetStd:{}."
+ "minBroker:{} is not underloaded. maxBroker:{} is not overloaded.",
stats.std(), targetStd, stats.peekMinBroker(), stats.peekMaxBroker());
}
break;
}
String maxBroker = stats.pollMaxBroker();
String minBroker = stats.peekMinBroker();
Optional<BrokerLoadData> maxBrokerLoadData = context.brokerLoadDataStore().get(maxBroker);
Optional<BrokerLoadData> minBrokerLoadData = context.brokerLoadDataStore().get(minBroker);
if (maxBrokerLoadData.isEmpty()) {
log.error(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " MaxBrokerLoadData is empty.", maxBroker));
numOfBrokersWithEmptyLoadData++;
continue;
}
if (minBrokerLoadData.isEmpty()) {
log.error("Can't transfer load to broker:{}. MinBrokerLoadData is empty.", minBroker);
numOfBrokersWithEmptyLoadData++;
continue;
}
double maxLoad = maxBrokerLoadData.get().getWeightedMaxEMA();
double minLoad = minBrokerLoadData.get().getWeightedMaxEMA();
double offload = (maxLoad - minLoad) / 2;
BrokerLoadData brokerLoadData = maxBrokerLoadData.get();
double maxBrokerThroughput = brokerLoadData.getMsgThroughputIn()
+ brokerLoadData.getMsgThroughputOut();
double minBrokerThroughput = minBrokerLoadData.get().getMsgThroughputIn()
+ minBrokerLoadData.get().getMsgThroughputOut();
double offloadThroughput = maxBrokerThroughput * offload / maxLoad;
if (debugMode) {
log.info(String.format(
"Attempting to shed load from broker:%s%s, which has the max resource "
+ "usage:%.2f%%, targetStd:%.2f,"
+ " -- Trying to offload %.2f%%, %.2f KByte/s of traffic.",
maxBroker, transfer ? " to broker:" + minBroker : "",
maxLoad * 100,
targetStd,
offload * 100,
offloadThroughput / KB
));
}
double trafficMarkedToOffload = 0;
double trafficMarkedToGain = 0;
Optional<TopBundlesLoadData> bundlesLoadData = context.topBundleLoadDataStore().get(maxBroker);
if (bundlesLoadData.isEmpty() || bundlesLoadData.get().getTopBundlesLoadData().isEmpty()) {
log.error(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " TopBundlesLoadData is empty.", maxBroker));
numOfBrokersWithEmptyLoadData++;
continue;
}
var maxBrokerTopBundlesLoadData = bundlesLoadData.get().getTopBundlesLoadData();
if (maxBrokerTopBundlesLoadData.size() == 1) {
numOfBrokersWithFewBundles++;
log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " Sole namespace bundle:%s is overloading the broker. ",
maxBroker, maxBrokerTopBundlesLoadData.iterator().next()));
continue;
}
Optional<TopBundlesLoadData> minBundlesLoadData = context.topBundleLoadDataStore().get(minBroker);
var minBrokerTopBundlesLoadDataIter =
minBundlesLoadData.isPresent() ? minBundlesLoadData.get().getTopBundlesLoadData().iterator() :
null;
if (maxBrokerTopBundlesLoadData.isEmpty()) {
numOfBrokersWithFewBundles++;
log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " Broker overloaded despite having no bundles", maxBroker));
continue;
}
int remainingTopBundles = maxBrokerTopBundlesLoadData.size();
for (var e : maxBrokerTopBundlesLoadData) {
String bundle = e.bundleName();
if (channel != null && !channel.isOwner(bundle, maxBroker)) {
if (debugMode) {
log.warn(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " MaxBroker:%s is not the owner.", bundle, maxBroker));
}
continue;
}
if (recentlyUnloadedBundles.containsKey(bundle)) {
if (debugMode) {
log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " Bundle has been recently unloaded at ts:%d.",
bundle, recentlyUnloadedBundles.get(bundle)));
}
continue;
}
if (!isTransferable(context, availableBrokers, bundle, maxBroker, Optional.of(minBroker))) {
if (debugMode) {
log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " This unload can't meet "
+ "affinity(isolation) or anti-affinity group policies.", bundle));
}
continue;
}
if (remainingTopBundles <= 1) {
if (debugMode) {
log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " The remaining bundles in TopBundlesLoadData from the maxBroker:%s is"
+ " less than or equal to 1.",
bundle, maxBroker));
}
break;
}
var bundleData = e.stats();
double maxBrokerBundleThroughput = bundleData.msgThroughputIn + bundleData.msgThroughputOut;
boolean swap = false;
List<Unload> minToMaxUnloads = new ArrayList<>();
double minBrokerBundleSwapThroughput = 0.0;
if (trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput > offloadThroughput) {
// see if we can swap bundles from min to max broker to balance better.
if (transfer && minBrokerTopBundlesLoadDataIter != null) {
var maxBrokerNewThroughput =
maxBrokerThroughput - trafficMarkedToOffload + trafficMarkedToGain
- maxBrokerBundleThroughput;
var minBrokerNewThroughput =
minBrokerThroughput + trafficMarkedToOffload - trafficMarkedToGain
+ maxBrokerBundleThroughput;
while (minBrokerTopBundlesLoadDataIter.hasNext()) {
var minBrokerBundleData = minBrokerTopBundlesLoadDataIter.next();
if (!isTransferable(context, availableBrokers,
minBrokerBundleData.bundleName(), minBroker, Optional.of(maxBroker))) {
continue;
}
var minBrokerBundleThroughput =
minBrokerBundleData.stats().msgThroughputIn
+ minBrokerBundleData.stats().msgThroughputOut;
var maxBrokerNewThroughputTmp = maxBrokerNewThroughput + minBrokerBundleThroughput;
var minBrokerNewThroughputTmp = minBrokerNewThroughput - minBrokerBundleThroughput;
if (maxBrokerNewThroughputTmp < maxBrokerThroughput
&& minBrokerNewThroughputTmp < maxBrokerThroughput) {
minToMaxUnloads.add(new Unload(minBroker,
minBrokerBundleData.bundleName(), Optional.of(maxBroker)));
maxBrokerNewThroughput = maxBrokerNewThroughputTmp;
minBrokerNewThroughput = minBrokerNewThroughputTmp;
minBrokerBundleSwapThroughput += minBrokerBundleThroughput;
if (minBrokerNewThroughput <= maxBrokerNewThroughput
&& maxBrokerNewThroughput < maxBrokerThroughput * 0.75) {
swap = true;
break;
}
}
}
}
if (!swap) {
if (debugMode) {
log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " The traffic to unload:%.2f - gain:%.2f = %.2f KByte/s is "
+ "greater than the target :%.2f KByte/s.",
bundle,
(trafficMarkedToOffload + maxBrokerBundleThroughput) / KB,
trafficMarkedToGain / KB,
(trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput) / KB,
offloadThroughput / KB));
}
break;
}
}
Unload unload;
if (transfer) {
if (swap) {
minToMaxUnloads.forEach(minToMaxUnload -> {
if (debugMode) {
log.info("Decided to gain bundle:{} from min broker:{}",
minToMaxUnload.serviceUnit(), minToMaxUnload.sourceBroker());
}
var decision = new UnloadDecision();
decision.setUnload(minToMaxUnload);
decision.succeed(reason);
decisionCache.add(decision);
});
if (debugMode) {
log.info(String.format(
"Total traffic %.2f KByte/s to transfer from min broker:%s to max broker:%s.",
minBrokerBundleSwapThroughput / KB, minBroker, maxBroker));
trafficMarkedToGain += minBrokerBundleSwapThroughput;
}
}
unload = new Unload(maxBroker, bundle, Optional.of(minBroker));
} else {
unload = new Unload(maxBroker, bundle);
}
var decision = new UnloadDecision();
decision.setUnload(unload);
decision.succeed(reason);
decisionCache.add(decision);
trafficMarkedToOffload += maxBrokerBundleThroughput;
remainingTopBundles--;
if (debugMode) {
log.info(String.format("Decided to unload bundle:%s, throughput:%.2f KByte/s."
+ " The traffic marked to unload:%.2f - gain:%.2f = %.2f KByte/s."
+ " Target:%.2f KByte/s.",
bundle, maxBrokerBundleThroughput / KB,
trafficMarkedToOffload / KB,
trafficMarkedToGain / KB,
(trafficMarkedToOffload - trafficMarkedToGain) / KB,
offloadThroughput / KB));
}
}
if (trafficMarkedToOffload > 0) {
var adjustedOffload =
(trafficMarkedToOffload - trafficMarkedToGain) * maxLoad / maxBrokerThroughput;
stats.offload(maxLoad, minLoad, adjustedOffload);
if (debugMode) {
log.info(
String.format("brokers' load stats:%s, after offload{max:%.2f, min:%.2f, offload:%.2f}",
stats, maxLoad, minLoad, adjustedOffload));
}
} else {
numOfBrokersWithFewBundles++;
log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " There is no bundle that can be unloaded in top bundles load data. "
+ "Consider splitting bundles owned by the broker "
+ "to make each bundle serve less traffic "
+ "or increasing loadBalancerMaxNumberOfBundlesInBundleLoadReport"
+ " to report more bundles in the top bundles load data.", maxBroker));
}
} // while end
if (debugMode) {
log.info("decisionCache:{}", decisionCache);
}
if (decisionCache.isEmpty()) {
UnloadDecision.Reason reason;
if (numOfBrokersWithEmptyLoadData > 0) {
reason = NoLoadData;
} else if (numOfBrokersWithFewBundles > 0) {
reason = NoBundles;
} else {
reason = HitCount;
}
counter.update(Skip, reason);
} else {
unloadConditionHitCount = 0;
}
} catch (Throwable e) {
log.error("Failed to process unloading. ", e);
this.counter.update(Failure, Unknown);
}
return decisionCache;
}
|
@Test
public void testOverloadOutlier() {
UnloadCounter counter = new UnloadCounter();
TransferShedder transferShedder = new TransferShedder(counter);
var ctx = setupContextLoadSkewedOverload(100);
var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of());
Assertions.assertThat(res).isIn(
Set.of(new UnloadDecision(
new Unload("broker99:8080", "my-tenant/my-namespace99/0x00000000_0x0FFFFFFF",
Optional.of("broker52:8080")), Success, Underloaded)),
Set.of(new UnloadDecision(
new Unload("broker99:8080", "my-tenant/my-namespace99/0x00000000_0x0FFFFFFF",
Optional.of("broker83:8080")), Success, Underloaded))
);
assertEquals(counter.getLoadAvg(), 0.019900000000000008, 0.00001);
assertEquals(counter.getLoadStd(), 0.09850375627355534, 0.00001);
}
|
public static String toStr(Number number, String defaultValue) {
return (null == number) ? defaultValue : toStr(number);
}
|
@Test
public void toStrTest(){
assertEquals("1", NumberUtil.toStr(new BigDecimal("1.0000000000")));
assertEquals("0", NumberUtil.toStr(NumberUtil.sub(new BigDecimal("9600.00000"), new BigDecimal("9600.00000"))));
assertEquals("0", NumberUtil.toStr(NumberUtil.sub(new BigDecimal("9600.0000000000"), new BigDecimal("9600.000000"))));
assertEquals("0", NumberUtil.toStr(new BigDecimal("9600.00000").subtract(new BigDecimal("9600.000000000"))));
}
|
public static CronExpression create(String expression) {
if (expression.isEmpty()) {
throw new InvalidCronExpressionException("empty expression");
}
String[] fields = expression.trim().toLowerCase().split("\\s+");
int count = fields.length;
if (count > 6 || count < 5) {
throw new InvalidCronExpressionException(
"crontab expression should have 6 fields for (seconds resolution) or 5 fields for (minutes resolution)");
}
CronExpression cronExpression = new CronExpression();
cronExpression.hasSecondsField = count == 6;
String token;
int index = 0;
if (cronExpression.hasSecondsField) {
token = fields[index++];
cronExpression.seconds = CronExpression.SECONDS_FIELD_PARSER.parse(token);
} else {
cronExpression.seconds = new BitSet(1);
cronExpression.seconds.set(0);
}
token = fields[index++];
cronExpression.minutes = CronExpression.MINUTES_FIELD_PARSER.parse(token);
token = fields[index++];
cronExpression.hours = CronExpression.HOURS_FIELD_PARSER.parse(token);
token = fields[index++];
String daysToken = token;
cronExpression.days = CronExpression.DAYS_FIELD_PARSER.parse(token);
cronExpression.isLastDayOfMonth = token.equals("l");
boolean daysStartWithAsterisk = token.startsWith("*");
token = fields[index++];
cronExpression.months = CronExpression.MONTHS_FIELD_PARSER.parse(token);
token = fields[index++];
cronExpression.daysOfWeek = CronExpression.DAY_OF_WEEK_FIELD_PARSER.parse(token);
boolean daysOfWeekStartAsterisk = token.startsWith("*");
if (token.length() == 2 && token.endsWith("l")) {
if (cronExpression.isLastDayOfMonth) {
throw new InvalidCronExpressionException("You can only specify the last day of month week in either the DAY field or in the DAY_OF_WEEK field, not both.");
}
if (!daysToken.equalsIgnoreCase("*")) {
throw new InvalidCronExpressionException("when last days of month is specified. the day of the month must be \"*\"");
}
// this flag will be used later duing finding the next schedule as some months have less than 31 days
cronExpression.isSpecificLastDayOfMonth = true;
}
cronExpression.daysOf5Weeks = generateDaysOf5Weeks(cronExpression.daysOfWeek);
cronExpression.daysAndDaysOfWeekRelation = (daysStartWithAsterisk || daysOfWeekStartAsterisk)
? DaysAndDaysOfWeekRelation.INTERSECT
: DaysAndDaysOfWeekRelation.UNION;
if (!cronExpression.canScheduleActuallyOccur())
throw new InvalidCronExpressionException("Cron expression not valid. The specified months do not have the day 30th or the day 31st");
cronExpression.expression = expression.trim();
return cronExpression;
}
|
@Test
void invalidCronExpressionThrowsException() {
assertThatThrownBy(() -> CronExpression.create("invalid")).isInstanceOf(InvalidCronExpressionException.class);
}
|
@Override
public boolean find(final Path file, final ListProgressListener listener) throws BackgroundException {
if(file.isRoot()) {
return delegate.find(file, listener);
}
if(cache.isValid(file.getParent())) {
final AttributedList<Path> list = cache.get(file.getParent());
final Path found = list.find(new ListFilteringFeature.ListFilteringPredicate(sensitivity, file));
if(found != null) {
if(log.isDebugEnabled()) {
log.debug(String.format("Found %s in cache", file));
}
return true;
}
if(log.isDebugEnabled()) {
log.debug(String.format("Cached directory listing does not contain %s", file));
}
return false;
}
final CachingListProgressListener caching = new CachingListProgressListener(cache);
final boolean found = delegate.find(file, new ProxyListProgressListener(listener, caching));
caching.cache();
return found;
}
|
@Test
public void testFindCaseSensitive() throws Exception {
final PathCache cache = new PathCache(1);
final Path directory = new Path("/", EnumSet.of(Path.Type.directory));
final Path file = new Path(directory, "f", EnumSet.of(Path.Type.file));
final CachingFindFeature feature = new CachingFindFeature(Protocol.Case.sensitive, cache, new DefaultFindFeature(new NullSession(new Host(new TestProtocol())) {
@Override
public AttributedList<Path> list(final Path folder, final ListProgressListener listener) throws ConnectionCanceledException {
listener.chunk(folder, new AttributedList<>(Collections.singleton(file)));
return new AttributedList<>(Collections.singleton(file));
}
@Override
public Protocol.Case getCaseSensitivity() {
return Protocol.Case.sensitive;
}
}));
assertTrue(feature.find(file, new DisabledListProgressListener()));
assertEquals(1, cache.size());
assertTrue(cache.isCached(directory));
assertFalse(feature.find(new Path(directory, "F", EnumSet.of(Path.Type.file)), new DisabledListProgressListener()));
}
|
public DoubleArrayAsIterable usingExactEquality() {
return new DoubleArrayAsIterable(EXACT_EQUALITY_CORRESPONDENCE, iterableSubject());
}
|
@Test
public void usingExactEquality_containsExactly_primitiveDoubleArray_success() {
assertThat(array(1.1, 2.2, 3.3)).usingExactEquality().containsExactly(array(2.2, 1.1, 3.3));
}
|
@Override @Nonnull public ListIterator<T> listIterator(final int initialIndex) {
final Iterator<T> initialIterator;
try {
initialIterator = iterator(initialIndex);
} catch (NoSuchElementException ex) {
throw new IndexOutOfBoundsException();
}
return new AbstractListIterator<T>() {
private int index = initialIndex - 1;
@Nullable private Iterator<T> forwardIterator = initialIterator;
@Nonnull
private Iterator<T> getForwardIterator() {
if (forwardIterator == null) {
try {
forwardIterator = iterator(index+1);
} catch (IndexOutOfBoundsException ex) {
throw new NoSuchElementException();
}
}
return forwardIterator;
}
@Override public boolean hasNext() {
return getForwardIterator().hasNext();
}
@Override public boolean hasPrevious() {
return index >= 0;
}
@Override public T next() {
T ret = getForwardIterator().next();
index++;
return ret;
}
@Override public int nextIndex() {
return index+1;
}
@Override public T previous() {
forwardIterator = null;
try {
return iterator(index--).next();
} catch (IndexOutOfBoundsException ex) {
throw new NoSuchElementException();
}
}
@Override public int previousIndex() {
return index;
}
};
}
|
@Test
public void testReverseIteration() {
testReverseIterationImpl(list.listIterator(100));
}
|
protected void setMethod() {
boolean activateBody = RestMeta.isActiveBody( wMethod.getText() );
boolean activateParams = RestMeta.isActiveParameters( wMethod.getText() );
wlBody.setEnabled( activateBody );
wBody.setEnabled( activateBody );
wApplicationType.setEnabled( activateBody );
wlParameters.setEnabled( activateParams );
wParameters.setEnabled( activateParams );
wGet.setEnabled( activateParams );
wlMatrixParameters.setEnabled( activateParams );
wMatrixParameters.setEnabled( activateParams );
wMatrixGet.setEnabled( activateParams );
}
|
@Test
public void testSetMethod_PUT() {
doReturn( RestMeta.HTTP_METHOD_PUT ).when( method ).getText();
dialog.setMethod();
verify( bodyl, times( 1 ) ).setEnabled( true );
verify( body, times( 1 ) ).setEnabled( true );
verify( type, times( 1 ) ).setEnabled( true );
verify( paramsl, times( 1 ) ).setEnabled( true );
verify( params, times( 1 ) ).setEnabled( true );
verify( paramsb, times( 1 ) ).setEnabled( true );
verify( matrixl, times( 1 ) ).setEnabled( true );
verify( matrix, times( 1 ) ).setEnabled( true );
verify( matrixb, times( 1 ) ).setEnabled( true );
}
|
public static String s1(int v) {
char[] result = new char[3];
if (v < 0) {
result[0] = '-';
v = -v;
} else {
result[0] = '+';
}
for (int i = 0; i < 2; i++) {
result[2 - i] = Character.forDigit(v & 0x0f, 16);
v >>= 4;
}
return new String(result);
}
|
@Test
public void testS1() {
Assert.assertEquals("+00", Hex.s1(0));
Assert.assertEquals("-00", Hex.s1(-2147483648));
Assert.assertEquals("+7b", Hex.s1(123));
Assert.assertEquals("+d2", Hex.s1(1234567890));
}
|
@Override
@CheckForNull
public EmailMessage format(Notification notification) {
if (!BuiltInQPChangeNotification.TYPE.equals(notification.getType())) {
return null;
}
BuiltInQPChangeNotificationBuilder profilesNotification = parse(notification);
StringBuilder message = new StringBuilder("The following built-in profiles have been updated:\n\n");
profilesNotification.getProfiles().stream()
.sorted(Comparator.comparing(Profile::getLanguageName).thenComparing(Profile::getProfileName))
.forEach(profile -> {
message.append("\"")
.append(profile.getProfileName())
.append("\" - ")
.append(profile.getLanguageName())
.append(": ")
.append(server.getPublicRootUrl()).append("/profiles/changelog?language=")
.append(profile.getLanguageKey())
.append("&name=")
.append(encode(profile.getProfileName()))
.append("&since=")
.append(formatDate(new Date(profile.getStartDate())))
.append("&to=")
.append(formatDate(new Date(profile.getEndDate())))
.append("\n");
int newRules = profile.getNewRules();
if (newRules > 0) {
message.append(" ").append(newRules).append(" new rule")
.append(plural(newRules))
.append('\n');
}
int updatedRules = profile.getUpdatedRules();
if (updatedRules > 0) {
message.append(" ").append(updatedRules).append(" rule")
.append(updatedRules > 1 ? "s have been updated" : " has been updated")
.append("\n");
}
int removedRules = profile.getRemovedRules();
if (removedRules > 0) {
message.append(" ").append(removedRules).append(" rule")
.append(plural(removedRules))
.append(" removed\n");
}
message.append("\n");
});
message.append("This is a good time to review your quality profiles and update them to benefit from the latest evolutions: ");
message.append(server.getPublicRootUrl()).append("/profiles");
// And finally return the email that will be sent
return new EmailMessage()
.setMessageId(BuiltInQPChangeNotification.TYPE)
.setSubject("Built-in quality profiles have been updated")
.setPlainTextMessage(message.toString());
}
|
@Test
public void notification_contains_a_subject() {
String profileName = newProfileName();
String languageKey = newLanguageKey();
String languageName = newLanguageName();
BuiltInQPChangeNotificationBuilder notification = new BuiltInQPChangeNotificationBuilder()
.addProfile(Profile.newBuilder()
.setProfileName(profileName)
.setLanguageKey(languageKey)
.setLanguageName(languageName)
.setNewRules(2)
.build());
EmailMessage emailMessage = underTest.format(notification.build());
assertThat(emailMessage.getSubject()).isEqualTo("Built-in quality profiles have been updated");
}
|
public static boolean getBooleanWithAltKeys(Configuration conf,
ConfigProperty<?> configProperty) {
Option<String> rawValue = getRawValueWithAltKeys(conf, configProperty);
boolean defaultValue = configProperty.hasDefaultValue()
? Boolean.parseBoolean(configProperty.defaultValue().toString()) : false;
return rawValue.map(Boolean::parseBoolean).orElse(defaultValue);
}
|
@Test
public void testGetBooleanWithAltKeysFromHadoopConf() {
Configuration conf = new Configuration();
assertEquals(Boolean.parseBoolean(TEST_BOOLEAN_CONFIG_PROPERTY.defaultValue()),
getBooleanWithAltKeys(conf, TEST_BOOLEAN_CONFIG_PROPERTY));
boolean setValue = !Boolean.parseBoolean(TEST_BOOLEAN_CONFIG_PROPERTY.defaultValue());
conf.setBoolean(TEST_BOOLEAN_CONFIG_PROPERTY.key(), setValue);
assertEquals(setValue,
getBooleanWithAltKeys(conf, TEST_BOOLEAN_CONFIG_PROPERTY));
conf = new Configuration();
conf.setBoolean(TEST_BOOLEAN_CONFIG_PROPERTY.getAlternatives().get(0), setValue);
assertEquals(setValue,
getBooleanWithAltKeys(conf, TEST_BOOLEAN_CONFIG_PROPERTY));
}
|
@Udf
public <T> Map<String, T> union(
@UdfParameter(description = "first map to union") final Map<String, T> map1,
@UdfParameter(description = "second map to union") final Map<String, T> map2) {
final List<Map<String, T>> nonNullInputs =
Stream.of(map1, map2)
.filter(Objects::nonNull)
.collect(Collectors.toList());
if (nonNullInputs.size() == 0) {
return null;
}
final Map<String, T> output = new HashMap<>();
nonNullInputs
.forEach(output::putAll);
return output;
}
|
@Test
public void shouldUnionWithNullMap() {
final Map<String, Integer> input1 = Maps.newHashMap();
input1.put("foo", 1);
input1.put("bar", 2);
final Map<String, Integer> result = udf.union(input1, null);
assertThat(result.size(), is(2));
assertThat(result.get("foo"), is(1));
assertThat(result.keySet(), containsInAnyOrder("foo", "bar"));
}
|
public RingbufferConfig setInMemoryFormat(InMemoryFormat inMemoryFormat) {
checkNotNull(inMemoryFormat, "inMemoryFormat can't be null");
checkFalse(inMemoryFormat == NATIVE, "InMemoryFormat " + NATIVE + " is not supported");
this.inMemoryFormat = inMemoryFormat;
return this;
}
|
@Test(expected = IllegalArgumentException.class)
public void setInMemoryFormat_whenNative() {
RingbufferConfig config = new RingbufferConfig(NAME);
config.setInMemoryFormat(InMemoryFormat.NATIVE);
}
|
public static NameStep newBuilder() {
return new CharacterSteps();
}
|
@Test
void testBuildWeakWarrior() {
final var character = CharacterStepBuilder.newBuilder()
.name("Weak warrior")
.fighterClass("none")
.withWeapon("Slingshot")
.noAbilities()
.build();
assertEquals("Weak warrior", character.getName());
assertEquals("none", character.getFighterClass());
assertEquals("Slingshot", character.getWeapon());
assertNull(character.getAbilities());
assertNotNull(character.toString());
}
|
public SearchJob executeSync(String searchId, SearchUser searchUser, ExecutionState executionState) {
return searchDomain.getForUser(searchId, searchUser)
.map(s -> executeSync(s, searchUser, executionState))
.orElseThrow(() -> new NotFoundException("No search found with id <" + searchId + ">."));
}
|
@Test
public void checksUserPermissionsForSearch() {
final Search search = Search.builder()
.queries(ImmutableSet.of(
Query.builder()
.filter(StreamFilter.ofId("forbidden_stream"))
.build()
))
.build();
final SearchUser searchUser = TestSearchUser.builder()
.denyStream("forbidden_stream")
.build();
when(searchDomain.getForUser(eq("search1"), eq(searchUser))).thenReturn(Optional.of(search));
assertThatExceptionOfType(MissingStreamPermissionException.class)
.isThrownBy(() -> this.searchExecutor.executeSync("search1", searchUser, ExecutionState.empty()));
}
|
protected CompletableFuture<List<IndexItem>> queryAsyncFromSegmentFile(
String key, int maxCount, long beginTime, long endTime) {
if (this.fileSegment == null || !UPLOAD.equals(this.fileStatus.get())) {
return CompletableFuture.completedFuture(Collections.emptyList());
}
Stopwatch stopwatch = Stopwatch.createStarted();
int hashCode = this.hashCode(key);
int slotPosition = this.getSlotPosition(hashCode % this.hashSlotMaxCount);
CompletableFuture<List<IndexItem>> future = this.fileSegment.readAsync(slotPosition, HASH_SLOT_SIZE)
.thenCompose(slotBuffer -> {
if (slotBuffer.remaining() < HASH_SLOT_SIZE) {
log.error("IndexStoreFile query from tiered storage return error slot buffer, " +
"key: {}, maxCount: {}, timestamp={}-{}", key, maxCount, beginTime, endTime);
return CompletableFuture.completedFuture(null);
}
int indexPosition = slotBuffer.getInt();
int indexTotalSize = Math.min(slotBuffer.getInt(), COMPACT_INDEX_ITEM_SIZE * 1024);
if (indexPosition <= INVALID_INDEX || indexTotalSize <= 0) {
return CompletableFuture.completedFuture(null);
}
return this.fileSegment.readAsync(indexPosition, indexTotalSize);
})
.thenApply(itemBuffer -> {
List<IndexItem> result = new ArrayList<>();
if (itemBuffer == null) {
return result;
}
if (itemBuffer.remaining() % COMPACT_INDEX_ITEM_SIZE != 0) {
log.error("IndexStoreFile query from tiered storage return error item buffer, " +
"key: {}, maxCount: {}, timestamp={}-{}", key, maxCount, beginTime, endTime);
return result;
}
int size = itemBuffer.remaining() / COMPACT_INDEX_ITEM_SIZE;
byte[] bytes = new byte[COMPACT_INDEX_ITEM_SIZE];
for (int i = 0; i < size; i++) {
itemBuffer.get(bytes);
IndexItem indexItem = new IndexItem(bytes);
long storeTimestamp = indexItem.getTimeDiff() + beginTimestamp.get();
if (hashCode == indexItem.getHashCode() &&
beginTime <= storeTimestamp && storeTimestamp <= endTime &&
result.size() < maxCount) {
result.add(indexItem);
}
}
return result;
});
return future.whenComplete((result, throwable) -> {
long costTime = stopwatch.elapsed(TimeUnit.MILLISECONDS);
if (throwable != null) {
log.error("IndexStoreFile query from segment file, cost: {}ms, timestamp: {}, " +
"key: {}, hashCode: {}, maxCount: {}, timestamp={}-{}",
costTime, getTimestamp(), key, hashCode, maxCount, beginTime, endTime, throwable);
} else {
String details = Optional.ofNullable(result)
.map(r -> r.stream()
.map(item -> String.format("%d-%d", item.getQueueId(), item.getOffset()))
.collect(Collectors.joining(", ")))
.orElse("");
log.debug("IndexStoreFile query from segment file, cost: {}ms, timestamp: {}, result size: {}, ({}), " +
"key: {}, hashCode: {}, maxCount: {}, timestamp={}-{}",
costTime, getTimestamp(), result != null ? result.size() : 0, details, key, hashCode, maxCount, beginTime, endTime);
}
});
}
|
@Test
public void queryAsyncFromSegmentFileTest() throws ExecutionException, InterruptedException {
long timestamp = indexStoreFile.getTimestamp();
for (int i = 0; i < 5; i++) {
for (int j = 0; j < 3; j++) {
Assert.assertEquals(AppendResult.SUCCESS, indexStoreFile.putKey(TOPIC_NAME + i,
TOPIC_ID, QUEUE_ID, KEY_SET, MESSAGE_OFFSET, MESSAGE_SIZE, System.currentTimeMillis()));
}
}
ByteBuffer byteBuffer = indexStoreFile.doCompaction();
FileSegment fileSegment = new PosixFileSegment(
storeConfig, FileSegmentType.INDEX, filePath, 0L);
fileSegment.append(byteBuffer, timestamp);
fileSegment.commitAsync().join();
Assert.assertEquals(byteBuffer.limit(), fileSegment.getSize());
indexStoreFile.destroy();
indexStoreFile = new IndexStoreFile(storeConfig, fileSegment);
// change topic
List<IndexItem> itemList = indexStoreFile.queryAsync(
TOPIC_NAME, KEY, 64, timestamp, System.currentTimeMillis()).get();
Assert.assertEquals(0, itemList.size());
// change key
itemList = indexStoreFile.queryAsync(
TOPIC_NAME, KEY + "1", 64, timestamp, System.currentTimeMillis()).get();
Assert.assertEquals(0, itemList.size());
itemList = indexStoreFile.queryAsync(
TOPIC_NAME + "1", KEY, 64, timestamp, System.currentTimeMillis()).get();
Assert.assertEquals(3, itemList.size());
}
|
static String encodeHeader(final String header) {
if (header == null) {
return null;
}
// Protect against HTTP response splitting vulnerability
// since value is written as part of the response header
// Ensure this header only has one header by removing
// CRs and LFs
return header.split("\n|\r")[0].trim();
}
|
@Test
public void testEncodeHeaders() {
String validOrigin = "http://localhost:12345";
String encodedValidOrigin = CrossOriginFilter.encodeHeader(validOrigin);
Assert.assertEquals("Valid origin encoding should match exactly",
validOrigin, encodedValidOrigin);
String httpResponseSplitOrigin = validOrigin + " \nSecondHeader: value";
String encodedResponseSplitOrigin =
CrossOriginFilter.encodeHeader(httpResponseSplitOrigin);
Assert.assertEquals("Http response split origin should be protected against",
validOrigin, encodedResponseSplitOrigin);
// Test Origin List
String validOriginList = "http://foo.example.com:12345 http://bar.example.com:12345";
String encodedValidOriginList = CrossOriginFilter
.encodeHeader(validOriginList);
Assert.assertEquals("Valid origin list encoding should match exactly",
validOriginList, encodedValidOriginList);
}
|
boolean openNextFile() {
try {
if ( meta.getFileInFields() ) {
data.readrow = getRow(); // Grab another row ...
if ( data.readrow == null ) { // finished processing!
if ( isDetailed() ) {
logDetailed( BaseMessages.getString( PKG, "LoadFileInput.Log.FinishedProcessing" ) );
}
return false;
}
if ( first ) {
first = false;
data.inputRowMeta = getInputRowMeta();
data.outputRowMeta = data.inputRowMeta.clone();
meta.getFields( data.outputRowMeta, getStepname(), null, null, this, repository, metaStore );
// Create convert meta-data objects that will contain Date & Number formatters
// All non binary content is handled as a String. It would be converted to the target type after the processing.
data.convertRowMeta = data.outputRowMeta.cloneToType( ValueMetaInterface.TYPE_STRING );
if ( meta.getFileInFields() ) {
// Check is filename field is provided
if ( Utils.isEmpty( meta.getDynamicFilenameField() ) ) {
logError( BaseMessages.getString( PKG, "LoadFileInput.Log.NoField" ) );
throw new KettleException( BaseMessages.getString( PKG, "LoadFileInput.Log.NoField" ) );
}
// cache the position of the field
if ( data.indexOfFilenameField < 0 ) {
data.indexOfFilenameField = data.inputRowMeta.indexOfValue( meta.getDynamicFilenameField() );
if ( data.indexOfFilenameField < 0 ) {
// The field is unreachable !
logError( BaseMessages.getString( PKG, "LoadFileInput.Log.ErrorFindingField" )
+ "[" + meta.getDynamicFilenameField() + "]" );
throw new KettleException( BaseMessages.getString(
PKG, "LoadFileInput.Exception.CouldnotFindField", meta.getDynamicFilenameField() ) );
}
}
// Get the number of previous fields
data.totalpreviousfields = data.inputRowMeta.size();
}
} // end if first
// get field value
String Fieldvalue = data.inputRowMeta.getString( data.readrow, data.indexOfFilenameField );
if ( isDetailed() ) {
logDetailed( BaseMessages.getString(
PKG, "LoadFileInput.Log.Stream", meta.getDynamicFilenameField(), Fieldvalue ) );
}
try {
// Source is a file.
data.file = KettleVFS.getFileObject( Fieldvalue );
} catch ( Exception e ) {
throw new KettleException( e );
}
} else {
if ( data.filenr >= data.files.nrOfFiles() ) {
// finished processing!
if ( isDetailed() ) {
logDetailed( BaseMessages.getString( PKG, "LoadFileInput.Log.FinishedProcessing" ) );
}
return false;
}
// Is this the last file?
data.last_file = ( data.filenr == data.files.nrOfFiles() - 1 );
data.file = data.files.getFile( data.filenr );
}
// Check if file exists
if ( meta.isIgnoreMissingPath() && !data.file.exists() ) {
logBasic( BaseMessages.getString( PKG, "LoadFileInput.Error.FileNotExists", "" + data.file.getName() ) );
return openNextFile();
}
// Check if file is empty
data.fileSize = data.file.getContent().getSize();
// Move file pointer ahead!
data.filenr++;
if ( meta.isIgnoreEmptyFile() && data.fileSize == 0 ) {
logError( BaseMessages.getString( PKG, "LoadFileInput.Error.FileSizeZero", "" + data.file.getName() ) );
return openNextFile();
} else {
if ( isDetailed() ) {
logDetailed( BaseMessages.getString( PKG, "LoadFileInput.Log.OpeningFile", data.file.toString() ) );
}
data.filename = KettleVFS.getFilename( data.file );
// Add additional fields?
if ( meta.getShortFileNameField() != null && meta.getShortFileNameField().length() > 0 ) {
data.shortFilename = data.file.getName().getBaseName();
}
if ( meta.getPathField() != null && meta.getPathField().length() > 0 ) {
data.path = KettleVFS.getFilename( data.file.getParent() );
}
if ( meta.isHiddenField() != null && meta.isHiddenField().length() > 0 ) {
data.hidden = data.file.isHidden();
}
if ( meta.getExtensionField() != null && meta.getExtensionField().length() > 0 ) {
data.extension = data.file.getName().getExtension();
}
if ( meta.getLastModificationDateField() != null && meta.getLastModificationDateField().length() > 0 ) {
data.lastModificationDateTime = new Date( data.file.getContent().getLastModifiedTime() );
}
if ( meta.getUriField() != null && meta.getUriField().length() > 0 ) {
data.uriName = Const.optionallyDecodeUriString( data.file.getName().getURI() );
}
if ( meta.getRootUriField() != null && meta.getRootUriField().length() > 0 ) {
data.rootUriName = data.file.getName().getRootURI();
}
// get File content
getFileContent();
addFileToResultFilesName( data.file );
if ( isDetailed() ) {
logDetailed( BaseMessages.getString( PKG, "LoadFileInput.Log.FileOpened", data.file.toString() ) );
}
}
} catch ( Exception e ) {
logError( BaseMessages.getString( PKG, "LoadFileInput.Log.UnableToOpenFile", "" + data.filenr, data.file
.toString(), e.toString() ) );
stopAll();
setErrors( 1 );
return false;
}
return true;
}
|
@Test
public void testOpenNextFile_01_ignoreEmpty() {
stepMetaInterface.setIgnoreEmptyFile( true );
stepInputFiles.addFile( getFile( "input0.txt" ) );
stepInputFiles.addFile( getFile( "input1.txt" ) );
assertTrue( stepLoadFileInput.openNextFile() );
assertFalse( stepLoadFileInput.openNextFile() );
}
|
private PlantUmlDiagram createDiagram(List<String> rawDiagramLines) {
List<String> diagramLines = filterOutComments(rawDiagramLines);
Set<PlantUmlComponent> components = parseComponents(diagramLines);
PlantUmlComponents plantUmlComponents = new PlantUmlComponents(components);
List<ParsedDependency> dependencies = parseDependencies(plantUmlComponents, diagramLines);
return new PlantUmlDiagram.Builder(plantUmlComponents)
.withDependencies(dependencies)
.build();
}
|
@Test
public void ignores_database_components() {
File file = TestDiagram.in(temporaryFolder)
.rawLine("database \"DB\"")
.component("componentA").withAlias("aliasA").withStereoTypes("..packageA..")
.component("componentB").withAlias("aliasB").withStereoTypes("..packageB..")
.dependencyFrom("aliasA").to("aliasB")
.dependencyFrom("aliasB").to("DB")
.dependencyFrom("componentA").to("DB")
.write();
PlantUmlDiagram diagram = createDiagram(file);
PlantUmlComponent a = getComponentWithAlias(new Alias("aliasA"), diagram);
PlantUmlComponent b = getComponentWithAlias(new Alias("aliasB"), diagram);
assertThat(a.getDependencies()).containsOnly(b);
assertThat(b.getDependencies()).isEmpty();
}
|
public static boolean isUri(String potentialUri) {
if (StringUtils.isBlank(potentialUri)) {
return false;
}
try {
URI uri = new URI(potentialUri);
return uri.getScheme() != null && uri.getHost() != null;
} catch (URISyntaxException e) {
return false;
}
}
|
@Test public void
returns_false_when_uri_is_malformed() {
assertThat(UriValidator.isUri("&%!!"), is(false));
}
|
public boolean isUnknown() {
return Double.isInfinite(getRowCount())
|| Double.isInfinite(getRate())
|| Double.isInfinite(getWindow());
}
|
@Test
public void testUnknownRel() {
String sql = " select * from ORDER_DETAILS1 ";
RelNode root = env.parseQuery(sql);
RelNode unknown = new UnknownRel(root.getCluster(), null, null);
NodeStats nodeStats =
unknown
.metadata(NodeStatsMetadata.class, unknown.getCluster().getMetadataQuery())
.getNodeStats();
Assert.assertTrue(nodeStats.isUnknown());
}
|
@Override
public void write(final PostgreSQLPacketPayload payload, final Object value) {
throw new UnsupportedSQLOperationException("PostgreSQLBoolArrayBinaryProtocolValue.write()");
}
|
@Test
void assertWrite() {
assertThrows(UnsupportedSQLOperationException.class, () -> newInstance().write(new PostgreSQLPacketPayload(null, StandardCharsets.UTF_8), "val"));
}
|
@Override
public boolean equals(final Object object) {
if (this == object) {
return true;
}
if (null == object || getClass() != object.getClass()) {
return false;
}
DataNode dataNode = (DataNode) object;
return Objects.equal(dataSourceName.toUpperCase(), dataNode.dataSourceName.toUpperCase())
&& Objects.equal(tableName.toUpperCase(), dataNode.tableName.toUpperCase())
&& Objects.equal(null == schemaName ? null : schemaName.toUpperCase(), null == dataNode.schemaName ? null : dataNode.schemaName.toUpperCase());
}
|
@SuppressWarnings({"SimplifiableAssertion", "ConstantValue"})
@Test
void assertEquals() {
DataNode dataNode = new DataNode("ds_0.tbl_0");
assertThat(dataNode, is(new DataNode("ds_0.tbl_0")));
assertThat(dataNode, is(dataNode));
assertThat(dataNode, not(new DataNode("ds_0.tbl_1")));
assertFalse(dataNode.equals(null));
}
|
@Override
public boolean isSatisfied(int index, TradingRecord tradingRecord) {
final boolean satisfied = cross.getValue(index);
traceIsSatisfied(index, satisfied);
return satisfied;
}
|
@Test
public void repeatedlyHittingThresholdAfterCrossDown() {
Indicator<Num> evaluatedIndicator = new FixedDecimalIndicator(series, 11, 10, 9, 10, 9, 10, 9);
CrossedDownIndicatorRule rule = new CrossedDownIndicatorRule(evaluatedIndicator, 10);
assertFalse(rule.isSatisfied(0));
assertFalse(rule.isSatisfied(1));
assertTrue("first cross down", rule.isSatisfied(2));
assertFalse(rule.isSatisfied(3));
assertFalse(rule.isSatisfied(4));
assertFalse(rule.isSatisfied(5));
assertFalse(rule.isSatisfied(6));
}
|
public boolean hasViewAccessToTemplate(PipelineTemplateConfig template, CaseInsensitiveString username, List<Role> roles, boolean isGroupAdministrator) {
boolean hasViewAccessToTemplate = template.getAuthorization().isViewUser(username, roles);
hasViewAccessToTemplate = hasViewAccessToTemplate || (template.isAllowGroupAdmins() && isGroupAdministrator);
return hasViewAccessToTemplate;
}
|
@Test
public void shouldReturnTrueIfUserWithinARoleCanViewTemplate() {
CaseInsensitiveString templateViewUser = new CaseInsensitiveString("template-admin");
Role securityConfigRole = getSecurityConfigRole(templateViewUser);
List<Role> roles = setupRoles(securityConfigRole);
String templateName = "template1";
PipelineTemplateConfig template = PipelineTemplateConfigMother.createTemplate(templateName, StageConfigMother.manualStage("stage"));
template.setAuthorization(new Authorization(new ViewConfig(new AdminRole(securityConfigRole))));
TemplatesConfig templates = new TemplatesConfig(template);
assertThat(templates.hasViewAccessToTemplate(template, templateViewUser, roles, false), is(true));
}
|
@Override
public void process(String host, String value, ApplicationId applicationId,
ApplicationSubmissionContext submissionContext) {
Set<String> applicationTags = submissionContext.getApplicationTags();
if (applicationTags == null) {
applicationTags = new HashSet<>();
} else {
applicationTags = new HashSet<>(applicationTags);
}
applicationTags.add(value);
submissionContext.setApplicationTags(applicationTags);
}
|
@Test
public void testTagAddProcessor() {
ContextProcessor tagAddProcessor = new TagAddProcessor();
ApplicationId app = ApplicationId.newInstance(123456, 111);
ApplicationSubmissionContext applicationSubmissionContext =
mock(ApplicationSubmissionContext.class);
when(applicationSubmissionContext.getApplicationId()).thenReturn(app);
tagAddProcessor.process("host.cluster2.com",
"cluster:cluster1", app, applicationSubmissionContext);
Set<String> applicationTags = new HashSet<String>();
applicationTags.add("cluster:cluster1");
verify(applicationSubmissionContext, times(1))
.setApplicationTags(applicationTags);
}
|
public static ImmutableList<String> splitToLowercaseTerms(String identifierName) {
if (ONLY_UNDERSCORES.matcher(identifierName).matches()) {
// Degenerate case of names which contain only underscore
return ImmutableList.of(identifierName);
}
return TERM_SPLITTER
.splitToStream(identifierName)
.map(String::toLowerCase)
.collect(toImmutableList());
}
|
@Test
public void splitToLowercaseTerms_findsSingleTerm_withOnlyUnderscore() {
String identifierName = "_____";
ImmutableList<String> terms = NamingConventions.splitToLowercaseTerms(identifierName);
assertThat(terms).containsExactly("_____");
}
|
public boolean isFailoverSwitch() {
return failoverSwitchEnable;
}
|
@Test
void testIsFailoverSwitch() throws NacosException {
assertFalse(failoverReactor.isFailoverSwitch());
}
|
public void verify(
Optional<String> expectedClusterId,
OptionalInt expectedNodeId,
EnumSet<VerificationFlag> verificationFlags
) {
Map<Uuid, String> seenUuids = new HashMap<>();
if (verificationFlags.contains(VerificationFlag.REQUIRE_AT_LEAST_ONE_VALID)) {
if (logDirProps.isEmpty()) {
throw new RuntimeException("No readable meta.properties files found.");
}
}
for (Entry<String, MetaProperties> entry : logDirProps.entrySet()) {
String logDir = entry.getKey();
String path = new File(logDir, META_PROPERTIES_NAME).toString();
MetaProperties metaProps = entry.getValue();
if (verificationFlags.contains(VerificationFlag.REQUIRE_V0)) {
if (!metaProps.version().equals(MetaPropertiesVersion.V0)) {
throw new RuntimeException("Found unexpected version in " + path + ". " +
"ZK-based brokers that are not migrating only support version 0 " +
"(which is implicit when the `version` field is missing).");
}
}
if (!metaProps.clusterId().isPresent()) {
if (metaProps.version().alwaysHasClusterId()) {
throw new RuntimeException("cluster.id was not specified in the v1 file: " +
path);
}
} else if (!expectedClusterId.isPresent()) {
expectedClusterId = metaProps.clusterId();
} else if (!metaProps.clusterId().get().equals(expectedClusterId.get())) {
throw new RuntimeException("Invalid cluster.id in: " + path + ". Expected " +
expectedClusterId.get() + ", but read " + metaProps.clusterId().get());
}
if (!metaProps.nodeId().isPresent()) {
if (metaProps.version().alwaysHasNodeId()) {
throw new RuntimeException("node.id was not specified in " + path);
}
} else if (!expectedNodeId.isPresent()) {
expectedNodeId = metaProps.nodeId();
} else if (metaProps.nodeId().getAsInt() != expectedNodeId.getAsInt()) {
throw new RuntimeException("Stored node id " + metaProps.nodeId().getAsInt() +
" doesn't match previous node id " + expectedNodeId.getAsInt() + " in " + path +
". If you moved your data, make sure your configured node id matches. If you " +
"intend to create a new node, you should remove all data in your data " +
"directories.");
}
if (metaProps.directoryId().isPresent()) {
if (DirectoryId.reserved(metaProps.directoryId().get())) {
throw new RuntimeException("Invalid resrved directory ID " +
metaProps.directoryId().get() + " found in " + logDir);
}
String prevLogDir = seenUuids.put(metaProps.directoryId().get(), logDir);
if (prevLogDir != null) {
throw new RuntimeException("Duplicate directory ID " + metaProps.directoryId() +
" found. It was the ID of " + prevLogDir + ", " + "but also of " +
logDir);
}
}
}
if (verificationFlags.contains(VerificationFlag.REQUIRE_METADATA_LOG_DIR)) {
if (!metadataLogDir.isPresent()) {
throw new RuntimeException("No metadata log directory was specified.");
}
}
if (metadataLogDir.isPresent()) {
if (errorLogDirs.contains(metadataLogDir.get())) {
throw new RuntimeException("Encountered I/O error in metadata log directory " +
metadataLogDir.get() + ". Cannot continue.");
}
}
}
|
@Test
public void testSuccessfulVerification() {
FOO.verify(Optional.empty(),
OptionalInt.empty(),
EnumSet.of(REQUIRE_AT_LEAST_ONE_VALID, REQUIRE_METADATA_LOG_DIR));
}
|
public AWSSecurityMapping getAWSLakeFormationSecurityMapping(String user)
{
Optional<AWSSecurityMapping> awsSecurityMapping = awsSecurityMappings.stream()
.filter(mapping -> (mapping.matches(user)))
.findFirst();
if (!awsSecurityMapping.isPresent()) {
throw new AccessDeniedException("No matching AWS Lake Formation Security Mapping");
}
verify(!awsSecurityMapping.get().getCredentials().isPresent(),
"Basic AWS Credentials are not supported for AWS Lake Formation Security Mapping");
verify(awsSecurityMapping.get().getIamRole().isPresent(),
"iamRole is mandatory for AWS Lake Formation Security Mapping");
return awsSecurityMapping.get();
}
|
@Test(
expectedExceptions = VerifyException.class,
expectedExceptionsMessageRegExp =
"(iamRole is mandatory for AWS Lake Formation Security Mapping|Basic AWS Credentials are not supported for AWS Lake Formation Security Mapping)")
public void testInvalidAWSLakeFormationMapping()
{
String lakeFormationSecurityMappingConfigPath =
this.getClass().getClassLoader().getResource("com.facebook.presto.hive.aws.security/aws-security-mapping-lakeformation-invalid.json").getPath();
AWSSecurityMappings mappings = parseJson(new File(lakeFormationSecurityMappingConfigPath).toPath(), AWSSecurityMappings.class);
// Fails with VerifyException: iamRole is mandatory for AWS Lake Formation Security Mapping
mappings.getAWSLakeFormationSecurityMapping(MappingSelector.empty().withUser("admin").getUser());
// Fails with VerifyException: Basic AWS Credentials are not supported for AWS Lake Formation Security Mapping
mappings.getAWSLakeFormationSecurityMapping(MappingSelector.empty().withUser("analyst").getUser());
}
|
public String getEcosystem(DefCveItem cve) {
final List<Reference> references = Optional.ofNullable(cve)
.map(DefCveItem::getCve)
.map(CveItem::getReferences)
.orElse(null);
if (Objects.nonNull(references)) {
for (Reference r : references) {
final Hit<String> ecosystem = search.findFirst(r.getUrl());
if (ecosystem != null) {
return ecosystem.value;
}
}
}
return null;
}
|
@Test
public void testUrlHostEcosystemMapper() {
UrlEcosystemMapper mapper = new UrlEcosystemMapper();
assertEquals(PythonPackageAnalyzer.DEPENDENCY_ECOSYSTEM, mapper.getEcosystem(asCve("https://python.org/path")));
}
|
@VisibleForTesting
void validateLevelUnique(List<MemberLevelDO> list, Long id, Integer level) {
for (MemberLevelDO levelDO : list) {
if (ObjUtil.notEqual(levelDO.getLevel(), level)) {
continue;
}
if (id == null || !id.equals(levelDO.getId())) {
throw exception(LEVEL_VALUE_EXISTS, levelDO.getLevel(), levelDO.getName());
}
}
}
|
@Test
public void testCreateLevel_levelUnique() {
// 准备参数
Integer level = randomInteger();
String name = randomString();
// mock 数据
memberlevelMapper.insert(randomLevelDO(o -> {
o.setLevel(level);
o.setName(name);
}));
// 调用,校验异常
List<MemberLevelDO> list = memberlevelMapper.selectList();
assertServiceException(() -> levelService.validateLevelUnique(list, null, level), LEVEL_VALUE_EXISTS, level, name);
}
|
public static <T extends Enum<T>> Validator enumValues(final Class<T> enumClass) {
final String[] enumValues = EnumSet.allOf(enumClass)
.stream()
.map(Object::toString)
.toArray(String[]::new);
final String[] validValues = Arrays.copyOf(enumValues, enumValues.length + 1);
validValues[enumValues.length] = null;
return ValidCaseInsensitiveString.in(validValues);
}
|
@Test
public void shouldNotThrowIfAValidEnumValueInDifferentCase() {
// Given:
final Validator validator = ConfigValidators.enumValues(TestEnum.class);
// When:
validator.ensureValid("propName", TestEnum.FOO.toString().toLowerCase());
// Then: did not throw
}
|
public String storeName() {
if (storeSupplier != null) {
return storeSupplier.name();
}
return storeName;
}
|
@Test
public void shouldUseStoreNameOfSupplierWhenProvided() {
final String storeName = "other-store-name";
when(supplier.name()).thenReturn(storeName);
final MaterializedInternal<Object, Object, KeyValueStore<Bytes, byte[]>> materialized =
new MaterializedInternal<>(Materialized.as(supplier), nameProvider, prefix);
assertThat(materialized.storeName(), equalTo(storeName));
}
|
public static Builder builder(Type type) {
return new Builder(type);
}
|
@Test
public void builder_throws_NPE_if_status_is_Null() {
assertThatThrownBy(() -> {
builder(Component.Type.DIRECTORY)
.setName("DIR")
.setKey(KEY)
.setUuid(UUID)
.setReportAttributes(ReportAttributes.newBuilder(1).build())
.build();
})
.isInstanceOf(NullPointerException.class);
}
|
@Override
public <T> Class<T> getClass(PropertyKey key) {
Object value = get(key);
if (value instanceof Class) {
return (Class<T>) value;
}
try {
return (Class<T>) Class.forName((String) value);
} catch (ClassNotFoundException e) {
throw new IllegalStateException(
format("Requested class %s can not be loaded", value));
}
}
|
@Test
public void getClassTest() { // The name getClass is already reserved.
mConfiguration.set(PropertyKey.USER_CLIENT_CACHE_EVICTOR_CLASS, "java.lang.String");
assertEquals(String.class,
mConfiguration.getClass(PropertyKey.USER_CLIENT_CACHE_EVICTOR_CLASS));
}
|
@Override
public PluginPostLoadHook addPluginPostLoadHook(PluginPostLoadHook pluginPostLoadHook) {
return pluginLoader.addPluginPostLoadHook(pluginPostLoadHook);
}
|
@Test
void shouldAllowRegistrationOfPluginPostLoadHooks() {
PluginManager pluginManager = new DefaultPluginManager(monitor, registry, goPluginOSGiFramework, jarChangeListener, null, systemEnvironment, pluginLoader);
final PluginPostLoadHook pluginPostLoadHook = mock(PluginPostLoadHook.class);
pluginManager.addPluginPostLoadHook(pluginPostLoadHook);
verify(pluginLoader).addPluginPostLoadHook(pluginPostLoadHook);
}
|
@Override
public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException {
return this.list(directory, listener, new HostPreferences(session.getHost()).getInteger("eue.listing.chunksize"));
}
|
@Test
public void testListTrash() throws Exception {
final EueResourceIdProvider fileid = new EueResourceIdProvider(session);
final Path folder = new Path("Gelöschte Dateien", EnumSet.of(directory));
final AttributedList<Path> list = new EueListService(session, fileid).list(folder, new DisabledListProgressListener());
}
|
@Override
public boolean test(Pickle pickle) {
URI picklePath = pickle.getUri();
if (!lineFilters.containsKey(picklePath)) {
return true;
}
for (Integer line : lineFilters.get(picklePath)) {
if (Objects.equals(line, pickle.getLocation().getLine())
|| Objects.equals(line, pickle.getScenarioLocation().getLine())
|| pickle.getExamplesLocation().map(Location::getLine).map(line::equals).orElse(false)
|| pickle.getRuleLocation().map(Location::getLine).map(line::equals).orElse(false)
|| pickle.getFeatureLocation().map(Location::getLine).map(line::equals).orElse(false)) {
return true;
}
}
return false;
}
|
@Test
void matches_feature() {
LinePredicate predicate = new LinePredicate(singletonMap(
featurePath,
singletonList(1)));
assertTrue(predicate.test(firstPickle));
assertTrue(predicate.test(secondPickle));
assertTrue(predicate.test(thirdPickle));
assertTrue(predicate.test(fourthPickle));
}
|
public void applyPayload(ConfigPayload payload) {
stack.push(new NamedBuilder(rootBuilder));
try {
handleValue(payload.getSlime().get());
} catch (Exception e) {
throw new RuntimeException("Not able to create config builder for payload '" + payload.toString() + "'", e);
}
}
|
@Test
public void testModelWithPathOnly() {
var configBuilder = new ResolvedTypesConfig.Builder();
var applier = new ConfigPayloadApplier<>(configBuilder, new MockAcquirer(), new MockDownloader());
var inputConfig = new ResolvedTypesConfig.Builder();
inputConfig.myPath(new FileReference("myPath.txt"));
inputConfig.myUrl(new UrlReference("myUrl.txt"));
inputConfig.myModel(ModelReference.valueOf("my-id \"\" myPath.txt"));
applier.applyPayload(ConfigPayload.fromInstance(inputConfig.build()));
var config = configBuilder.build();
assertEndsWith("resolvedPath/myPath.txt", config.myModel().toString());
}
|
@Override
public RemotingCommand processRequest(final ChannelHandlerContext ctx,
RemotingCommand request) throws RemotingCommandException {
return this.processRequest(ctx.channel(), request, true, true);
}
|
@Test
public void testProcessRequest_TopicNotExist() throws RemotingCommandException {
brokerController.getTopicConfigManager().getTopicConfigTable().remove(topic);
final RemotingCommand request = createPullMsgCommand(RequestCode.PULL_MESSAGE);
RemotingCommand response = pullMessageProcessor.processRequest(handlerContext, request);
assertThat(response).isNotNull();
assertThat(response.getCode()).isEqualTo(ResponseCode.TOPIC_NOT_EXIST);
assertThat(response.getRemark()).contains("topic[" + topic + "] not exist");
}
|
public Optional<String> getSchemaName() {
return schemaNames.isEmpty() ? Optional.empty() : Optional.of(schemaNames.iterator().next());
}
|
@Test
void assertGetSchemaName() {
SimpleTableSegment tableSegment1 = createTableSegment("table_1", "tbl_1");
tableSegment1.setOwner(new OwnerSegment(0, 0, new IdentifierValue("sharding_db_1")));
SimpleTableSegment tableSegment2 = createTableSegment("table_2", "tbl_2");
tableSegment2.setOwner(new OwnerSegment(0, 0, new IdentifierValue("sharding_db_1")));
TablesContext tablesContext = new TablesContext(Arrays.asList(tableSegment1, tableSegment2), TypedSPILoader.getService(DatabaseType.class, "FIXTURE"), DefaultDatabase.LOGIC_NAME);
assertTrue(tablesContext.getSchemaName().isPresent());
assertThat(tablesContext.getSchemaName().get(), is("sharding_db_1"));
}
|
public PackageDefinition getPackageDefinition() {
return packageDefinition;
}
|
@Test
public void shouldAddErrorIfPackagePluginDoesNotExistsForGivenPackageId() throws Exception {
PipelineConfigSaveValidationContext configSaveValidationContext = mock(PipelineConfigSaveValidationContext.class);
when(configSaveValidationContext.findPackageById(anyString())).thenReturn(mock(PackageRepository.class));
PackageRepository packageRepository = mock(PackageRepository.class);
when(packageRepository.doesPluginExist()).thenReturn(false);
PackageMaterialConfig packageMaterialConfig = new PackageMaterialConfig(new CaseInsensitiveString("package-name"), "package-id", PackageDefinitionMother.create("package-id"));
packageMaterialConfig.getPackageDefinition().setRepository(packageRepository);
packageMaterialConfig.validateTree(configSaveValidationContext);
assertThat(packageMaterialConfig.errors().getAll().size(), is(1));
assertThat(packageMaterialConfig.errors().on(PackageMaterialConfig.PACKAGE_ID), is("Could not find plugin for given package id:[package-id]."));
}
|
public static Object convertValue(String className, Object cleanValue, ClassLoader classLoader) {
// "null" string is converted to null
cleanValue = "null".equals(cleanValue) ? null : cleanValue;
if (!isPrimitive(className) && cleanValue == null) {
return null;
}
Class<?> clazz = loadClass(className, classLoader);
// if it is not a String, it has to be an instance of the desired type
if (!(cleanValue instanceof String)) {
if (clazz.isInstance(cleanValue)) {
return cleanValue;
}
throw new IllegalArgumentException(new StringBuilder().append("Object ").append(cleanValue)
.append(" is not a String or an instance of ").append(className).toString());
}
String value = (String) cleanValue;
try {
if (clazz.isAssignableFrom(String.class)) {
return value;
} else if (clazz.isAssignableFrom(BigDecimal.class)) {
return parseBigDecimal(value);
} else if (clazz.isAssignableFrom(BigInteger.class)) {
return parseBigInteger(value);
} else if (clazz.isAssignableFrom(Boolean.class) || clazz.isAssignableFrom(boolean.class)) {
return parseBoolean(value);
} else if (clazz.isAssignableFrom(Byte.class) || clazz.isAssignableFrom(byte.class)) {
return Byte.parseByte(value);
} else if (clazz.isAssignableFrom(Character.class) || clazz.isAssignableFrom(char.class)) {
return parseChar(value);
} else if (clazz.isAssignableFrom(Double.class) || clazz.isAssignableFrom(double.class)) {
return Double.parseDouble(cleanStringForNumberParsing(value));
} else if (clazz.isAssignableFrom(Float.class) || clazz.isAssignableFrom(float.class)) {
return Float.parseFloat(cleanStringForNumberParsing(value));
} else if (clazz.isAssignableFrom(Integer.class) || clazz.isAssignableFrom(int.class)) {
return Integer.parseInt(cleanStringForNumberParsing(value));
} else if (clazz.isAssignableFrom(LocalDate.class)) {
return LocalDate.parse(value, DateTimeFormatter.ISO_LOCAL_DATE);
} else if (clazz.isAssignableFrom(LocalDateTime.class)) {
return LocalDateTime.parse(value, DateTimeFormatter.ISO_LOCAL_DATE_TIME);
} else if (clazz.isAssignableFrom(LocalTime.class)) {
return LocalTime.parse(value, DateTimeFormatter.ISO_LOCAL_TIME);
} else if (clazz.isAssignableFrom(Long.class) || clazz.isAssignableFrom(long.class)) {
return Long.parseLong(cleanStringForNumberParsing(value));
} else if (clazz.isAssignableFrom(Short.class) || clazz.isAssignableFrom(short.class)) {
return Short.parseShort(cleanStringForNumberParsing(value));
} else if (Enum.class.isAssignableFrom(clazz)) {
return Enum.valueOf(((Class<? extends Enum>) clazz), value);
}
} catch (RuntimeException e) {
throw new IllegalArgumentException(new StringBuilder().append("Impossible to parse '")
.append(value).append("' as ").append(className).append(" [")
.append(e.getMessage()).append("]").toString());
}
throw new IllegalArgumentException(new StringBuilder().append("Class ").append(className)
.append(" is not natively supported. Please use an MVEL expression" +
" to use it.").toString());
}
|
@Test
public void convertValueFailLoadClassTest() {
assertThatThrownBy(() -> convertValue("my.NotExistingClass", "Test", classLoader))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageStartingWith("Impossible to load ");
}
|
public int size() {
return values.size();
}
|
@Test
public void shouldHandleRowWithNoElements() {
final GenericRow genericRow = new GenericRow();
assertThat(genericRow.size(), is(0));
}
|
@Override
public int hashCode() {
return operands.hashCode();
}
|
@Test
void requireThatHashCodeIsImplemented() {
assertEquals(new Conjunction().hashCode(), new Conjunction().hashCode());
}
|
public BeamFnApi.InstructionResponse.Builder processBundle(BeamFnApi.InstructionRequest request)
throws Exception {
BeamFnApi.ProcessBundleResponse.Builder response = BeamFnApi.ProcessBundleResponse.newBuilder();
BundleProcessor bundleProcessor =
bundleProcessorCache.get(
request,
() -> {
try {
return createBundleProcessor(
request.getProcessBundle().getProcessBundleDescriptorId(),
request.getProcessBundle());
} catch (IOException e) {
throw new RuntimeException(e);
}
});
try {
PTransformFunctionRegistry startFunctionRegistry = bundleProcessor.getStartFunctionRegistry();
PTransformFunctionRegistry finishFunctionRegistry =
bundleProcessor.getFinishFunctionRegistry();
ExecutionStateTracker stateTracker = bundleProcessor.getStateTracker();
try (HandleStateCallsForBundle beamFnStateClient = bundleProcessor.getBeamFnStateClient()) {
stateTracker.start(request.getInstructionId());
try {
// Already in reverse topological order so we don't need to do anything.
for (ThrowingRunnable startFunction : startFunctionRegistry.getFunctions()) {
LOG.debug("Starting function {}", startFunction);
startFunction.run();
}
if (request.getProcessBundle().hasElements()) {
boolean inputFinished =
bundleProcessor
.getInboundObserver()
.multiplexElements(request.getProcessBundle().getElements());
if (!inputFinished) {
throw new RuntimeException(
"Elements embedded in ProcessBundleRequest do not contain stream terminators for "
+ "all data and timer inputs. Unterminated endpoints: "
+ bundleProcessor.getInboundObserver().getUnfinishedEndpoints());
}
} else if (!bundleProcessor.getInboundEndpointApiServiceDescriptors().isEmpty()) {
BeamFnDataInboundObserver observer = bundleProcessor.getInboundObserver();
beamFnDataClient.registerReceiver(
request.getInstructionId(),
bundleProcessor.getInboundEndpointApiServiceDescriptors(),
observer);
observer.awaitCompletion();
beamFnDataClient.unregisterReceiver(
request.getInstructionId(),
bundleProcessor.getInboundEndpointApiServiceDescriptors());
}
// Need to reverse this since we want to call finish in topological order.
for (ThrowingRunnable finishFunction :
Lists.reverse(finishFunctionRegistry.getFunctions())) {
LOG.debug("Finishing function {}", finishFunction);
finishFunction.run();
}
// If bundleProcessor has not flushed any elements, embed them in response.
embedOutboundElementsIfApplicable(response, bundleProcessor);
// Add all checkpointed residuals to the response.
response.addAllResidualRoots(bundleProcessor.getSplitListener().getResidualRoots());
// Add all metrics to the response.
bundleProcessor.getProgressRequestLock().lock();
Map<String, ByteString> monitoringData = finalMonitoringData(bundleProcessor);
if (runnerAcceptsShortIds) {
response.putAllMonitoringData(monitoringData);
} else {
for (Map.Entry<String, ByteString> metric : monitoringData.entrySet()) {
response.addMonitoringInfos(
shortIds.get(metric.getKey()).toBuilder().setPayload(metric.getValue()));
}
}
if (!bundleProcessor.getBundleFinalizationCallbackRegistrations().isEmpty()) {
finalizeBundleHandler.registerCallbacks(
bundleProcessor.getInstructionId(),
ImmutableList.copyOf(bundleProcessor.getBundleFinalizationCallbackRegistrations()));
response.setRequiresFinalization(true);
}
} finally {
// We specifically deactivate state tracking while we are holding the progress request and
// sampling locks.
stateTracker.reset();
}
}
// Mark the bundle processor as re-usable.
bundleProcessorCache.release(
request.getProcessBundle().getProcessBundleDescriptorId(), bundleProcessor);
return BeamFnApi.InstructionResponse.newBuilder().setProcessBundle(response);
} catch (Exception e) {
// Make sure we clean-up from the active set of bundle processors.
bundleProcessorCache.discard(bundleProcessor);
throw e;
}
}
|
@Test
public void testBundleProcessorIsResetWhenAddedBackToCache() throws Exception {
BeamFnApi.ProcessBundleDescriptor processBundleDescriptor =
BeamFnApi.ProcessBundleDescriptor.newBuilder()
.putTransforms(
"2L",
RunnerApi.PTransform.newBuilder()
.setSpec(RunnerApi.FunctionSpec.newBuilder().setUrn(DATA_INPUT_URN).build())
.build())
.build();
Map<String, BeamFnApi.ProcessBundleDescriptor> fnApiRegistry =
ImmutableMap.of("1L", processBundleDescriptor);
ProcessBundleHandler handler =
new ProcessBundleHandler(
PipelineOptionsFactory.create(),
Collections.emptySet(),
fnApiRegistry::get,
beamFnDataClient,
null /* beamFnStateGrpcClientCache */,
null /* finalizeBundleHandler */,
new ShortIdMap(),
executionStateSampler,
ImmutableMap.of(DATA_INPUT_URN, (context) -> null),
Caches.noop(),
new TestBundleProcessorCache(),
null /* dataSampler */);
assertThat(TestBundleProcessor.resetCnt, equalTo(0));
handler.processBundle(
BeamFnApi.InstructionRequest.newBuilder()
.setInstructionId("998L")
.setProcessBundle(
BeamFnApi.ProcessBundleRequest.newBuilder().setProcessBundleDescriptorId("1L"))
.build());
// Check that BundleProcessor is reset when added back to the cache
assertThat(TestBundleProcessor.resetCnt, equalTo(1));
// BundleProcessor is added back to the BundleProcessorCache
assertThat(handler.bundleProcessorCache.getCachedBundleProcessors().size(), equalTo(1));
assertThat(
handler.bundleProcessorCache.getCachedBundleProcessors().get("1L").size(), equalTo(1));
// Add a reset handler that throws to test discarding the bundle processor on reset failure.
Iterables.getOnlyElement(handler.bundleProcessorCache.getCachedBundleProcessors().get("1L"))
.getResetFunctions()
.add(
() -> {
throw new IllegalStateException("ResetFailed");
});
handler.processBundle(
BeamFnApi.InstructionRequest.newBuilder()
.setInstructionId("999L")
.setProcessBundle(
BeamFnApi.ProcessBundleRequest.newBuilder().setProcessBundleDescriptorId("1L"))
.build());
// BundleProcessor is discarded instead of being added back to the BundleProcessorCache
assertThat(
handler.bundleProcessorCache.getCachedBundleProcessors().get("1L").size(), equalTo(0));
}
|
@VisibleForTesting
static ModificationTimeProvider createModificationTimeProvider(String modificationTime)
throws InvalidFilesModificationTimeException {
try {
switch (modificationTime) {
case "EPOCH_PLUS_SECOND":
Instant epochPlusSecond = Instant.ofEpochSecond(1);
return (ignored1, ignored2) -> epochPlusSecond;
default:
Instant timestamp =
DateTimeFormatter.ISO_DATE_TIME.parse(modificationTime, Instant::from);
return (ignored1, ignored2) -> timestamp;
}
} catch (DateTimeParseException ex) {
throw new InvalidFilesModificationTimeException(modificationTime, modificationTime, ex);
}
}
|
@Test
public void testCreateModificationTimeProvider_invalidValue() {
InvalidFilesModificationTimeException exception =
assertThrows(
InvalidFilesModificationTimeException.class,
() -> PluginConfigurationProcessor.createModificationTimeProvider("invalid format"));
assertThat(exception).hasMessageThat().isEqualTo("invalid format");
assertThat(exception.getInvalidFilesModificationTime()).isEqualTo("invalid format");
}
|
@Override
public void execute(ComputationStep.Context context) {
// no notification on pull requests as there is no real Quality Gate on those
if (analysisMetadataHolder.isPullRequest()) {
return;
}
executeForProject(treeRootHolder.getRoot());
}
|
@Test
public void no_event_if_no_raw_ALERT_STATUS_measure() {
when(measureRepository.getRawMeasure(PROJECT_COMPONENT, alertStatusMetric)).thenReturn(Optional.empty());
underTest.execute(new TestComputationStepContext());
verify(measureRepository).getRawMeasure(PROJECT_COMPONENT, alertStatusMetric);
verifyNoMoreInteractions(measureRepository, eventRepository);
}
|
public Input<DefaultIssue> create(Component component) {
return new RawLazyInput(component);
}
|
@Test
void create_whenSeverityAndTypeNotProvidedByIssueAndRule_shouldTakeFromTheRuleImpact() {
registerRule(RuleKey.of("external_eslint", "S001"), "rule",
r -> r.addDefaultImpact(MAINTAINABILITY, org.sonar.api.issue.impact.Severity.MEDIUM));
ScannerReport.ExternalIssue reportIssue = createIssue(null, null);
reportReader.putExternalIssues(FILE.getReportAttributes().getRef(), asList(reportIssue));
Input<DefaultIssue> input = underTest.create(FILE);
Collection<DefaultIssue> issues = input.getIssues();
assertThat(issues).hasSize(1);
DefaultIssue issue = Iterators.getOnlyElement(issues.iterator());
assertThat(issue.type()).isEqualTo(RuleType.CODE_SMELL);
assertThat(issue.severity()).isEqualTo(Severity.MAJOR);
}
|
int size()
{
return count;
}
|
@Test
void sizeReturnsZeroForAnEmptyIndex()
{
assertEquals(0, catalogIndex.size());
}
|
public static String quantityToRSDecimalStack(int quantity)
{
return quantityToRSDecimalStack(quantity, false);
}
|
@Test
public void quantityToPreciseStackSize()
{
assertEquals("0", QuantityFormatter.quantityToRSDecimalStack(0));
assertEquals("8500", QuantityFormatter.quantityToRSDecimalStack(8_500, true));
assertEquals("10K", QuantityFormatter.quantityToRSDecimalStack(10_000, true));
assertEquals("21.7K", QuantityFormatter.quantityToRSDecimalStack(21_710, true));
assertEquals("100K", QuantityFormatter.quantityToRSDecimalStack(100_000, true));
assertEquals("100.3K", QuantityFormatter.quantityToRSDecimalStack(100_310, true));
assertEquals("1M", QuantityFormatter.quantityToRSDecimalStack(1_000_000, true));
assertEquals("8.45M", QuantityFormatter.quantityToRSDecimalStack(8_450_000, true));
assertEquals("8.451M", QuantityFormatter.quantityToRSDecimalStack(8_451_000, true));
assertEquals("10M", QuantityFormatter.quantityToRSDecimalStack(10_000_000, true));
assertEquals("12.8M", QuantityFormatter.quantityToRSDecimalStack(12_800_000, true));
assertEquals("12.85M", QuantityFormatter.quantityToRSDecimalStack(12_850_000, true));
assertEquals("12.851M", QuantityFormatter.quantityToRSDecimalStack(12_851_000, true));
assertEquals("100M", QuantityFormatter.quantityToRSDecimalStack(100_000_000, true));
assertEquals("250.1M", QuantityFormatter.quantityToRSDecimalStack(250_100_000, true));
assertEquals("250.151M", QuantityFormatter.quantityToRSDecimalStack(250_151_000, true));
assertEquals("1B", QuantityFormatter.quantityToRSDecimalStack(1_000_000_000, true));
assertEquals("1.5B", QuantityFormatter.quantityToRSDecimalStack(1500_000_000, true));
assertEquals("1.55B", QuantityFormatter.quantityToRSDecimalStack(1550_000_000, true));
assertEquals("2.147B", QuantityFormatter.quantityToRSDecimalStack(Integer.MAX_VALUE, true));
}
|
public static ParamType getSchemaFromType(final Type type) {
return getSchemaFromType(type, JAVA_TO_ARG_TYPE);
}
|
@Test
public void shouldGetArraySchemaFromListClass() throws NoSuchMethodException {
final Type type = getClass().getDeclaredMethod("listType", List.class)
.getGenericParameterTypes()[0];
final ParamType schema = UdfUtil.getSchemaFromType(type);
assertThat(schema, instanceOf(ArrayType.class));
assertThat(((ArrayType) schema).element(), equalTo(ParamTypes.DOUBLE));
}
|
void processMsg(SessionMetaData sessionMd, String msg) throws IOException {
WebSocketSessionRef sessionRef = sessionMd.sessionRef;
WsCommandsWrapper cmdsWrapper;
try {
switch (sessionRef.getSessionType()) {
case GENERAL:
cmdsWrapper = JacksonUtil.fromString(msg, WsCommandsWrapper.class);
break;
case TELEMETRY:
cmdsWrapper = JacksonUtil.fromString(msg, TelemetryCmdsWrapper.class).toCommonCmdsWrapper();
break;
case NOTIFICATIONS:
cmdsWrapper = JacksonUtil.fromString(msg, NotificationCmdsWrapper.class).toCommonCmdsWrapper();
break;
default:
return;
}
} catch (Exception e) {
log.debug("{} Failed to decode subscription cmd: {}", sessionRef, e.getMessage(), e);
if (sessionRef.getSecurityCtx() != null) {
webSocketService.sendError(sessionRef, 1, SubscriptionErrorCode.BAD_REQUEST, "Failed to parse the payload");
} else {
close(sessionRef, CloseStatus.BAD_DATA.withReason(e.getMessage()));
}
return;
}
if (sessionRef.getSecurityCtx() != null) {
log.trace("{} Processing {}", sessionRef, msg);
webSocketService.handleCommands(sessionRef, cmdsWrapper);
} else {
AuthCmd authCmd = cmdsWrapper.getAuthCmd();
if (authCmd == null) {
close(sessionRef, CloseStatus.POLICY_VIOLATION.withReason("Auth cmd is missing"));
return;
}
log.trace("{} Authenticating session", sessionRef);
SecurityUser securityCtx;
try {
securityCtx = authenticationProvider.authenticate(authCmd.getToken());
} catch (Exception e) {
close(sessionRef, CloseStatus.BAD_DATA.withReason(e.getMessage()));
return;
}
sessionRef.setSecurityCtx(securityCtx);
pendingSessions.invalidate(sessionMd.session.getId());
establishSession(sessionMd.session, sessionRef, sessionMd);
webSocketService.handleCommands(sessionRef, cmdsWrapper);
}
}
|
@Test
void sendHandler_onMsg_allProcessed() throws Exception {
Deque<String> msgs = new ConcurrentLinkedDeque<>();
doAnswer(inv -> msgs.add(inv.getArgument(1))).when(wsHandler).processMsg(any(), any());
for (int i = 0; i < 100; i++) {
String msg = String.valueOf(i);
executor.submit(() -> {
try {
Thread.sleep(new Random().nextInt(50));
sendHandler.onMsg(msg);
} catch (Exception e) {
throw new RuntimeException(e);
}
});
}
executor.shutdown();
executor.awaitTermination(5, TimeUnit.SECONDS);
assertThat(msgs).map(Integer::parseInt).doesNotHaveDuplicates().hasSize(100);
}
|
@Override
public List<Connection> getConnections(final String databaseName, final String dataSourceName, final int connectionOffset, final int connectionSize,
final ConnectionMode connectionMode) throws SQLException {
Preconditions.checkNotNull(databaseName, "Current database name is null.");
Collection<Connection> connections;
String cacheKey = getKey(databaseName, dataSourceName);
synchronized (cachedConnections) {
connections = cachedConnections.get(cacheKey);
}
List<Connection> result;
int maxConnectionSize = connectionOffset + connectionSize;
if (connections.size() >= maxConnectionSize) {
result = new ArrayList<>(connections).subList(connectionOffset, maxConnectionSize);
} else if (connections.isEmpty()) {
Collection<Connection> newConnections = createNewConnections(databaseName, dataSourceName, maxConnectionSize, connectionMode);
result = new ArrayList<>(newConnections).subList(connectionOffset, maxConnectionSize);
synchronized (cachedConnections) {
cachedConnections.putAll(cacheKey, newConnections);
}
executeTransactionHooksAfterCreateConnections(result);
} else {
List<Connection> allConnections = new ArrayList<>(maxConnectionSize);
allConnections.addAll(connections);
List<Connection> newConnections = createNewConnections(databaseName, dataSourceName, maxConnectionSize - connections.size(), connectionMode);
allConnections.addAll(newConnections);
result = allConnections.subList(connectionOffset, maxConnectionSize);
synchronized (cachedConnections) {
cachedConnections.putAll(cacheKey, newConnections);
}
}
return result;
}
|
@Test
void assertGetConnectionWithConnectionPostProcessors() throws SQLException {
connectionSession.getTransactionStatus().setInTransaction(true);
when(backendDataSource.getConnections(anyString(), anyString(), eq(2), any())).thenReturn(MockConnectionUtils.mockNewConnections(2));
setConnectionPostProcessors();
List<Connection> actualConnections = databaseConnectionManager.getConnections(DefaultDatabase.LOGIC_NAME, "ds1", 0, 2, ConnectionMode.MEMORY_STRICTLY);
verify(databaseConnectionManager.getConnectionPostProcessors().iterator().next(), times(2)).process(any());
assertThat(actualConnections.size(), is(2));
assertTrue(connectionSession.getTransactionStatus().isInTransaction());
}
|
public static ParsedCommand parse(
// CHECKSTYLE_RULES.ON: CyclomaticComplexity
final String sql, final Map<String, String> variables) {
validateSupportedStatementType(sql);
final String substituted;
try {
substituted = VariableSubstitutor.substitute(KSQL_PARSER.parse(sql).get(0), variables);
} catch (ParseFailedException e) {
throw new MigrationException(String.format(
"Failed to parse the statement. Statement: %s. Reason: %s",
sql, e.getMessage()));
}
final SqlBaseParser.SingleStatementContext statementContext = KSQL_PARSER.parse(substituted)
.get(0).getStatement();
final boolean isStatement = StatementType.get(statementContext.statement().getClass())
== StatementType.STATEMENT;
return new ParsedCommand(substituted,
isStatement ? Optional.empty() : Optional.of(new AstBuilder(TypeRegistry.EMPTY)
.buildStatement(statementContext)));
}
|
@Test
public void shouldParseCreateOrReplaceStatement() {
// When:
List<CommandParser.ParsedCommand> commands = parse("create or replace stream FOO (A STRING) WITH (KAFKA_TOPIC='FOO', VALUE_FORMAT='DELIMITED');");
// Then:
assertThat(commands.size(), is(1));
assertThat(commands.get(0).getStatement().isPresent(), is (false));
assertThat(commands.get(0).getCommand(), is("create or replace stream FOO (A STRING) WITH (KAFKA_TOPIC='FOO', VALUE_FORMAT='DELIMITED');"));
}
|
@Override
public void unbuffer() {
mExternalFileInStream.ifPresent((stream) -> stream.unbuffer());
}
|
@Test
public void testUnbuffer() throws Exception {
int fileSize = mPageSize;
byte[] testData = BufferUtils.getIncreasingByteArray(fileSize);
ByteArrayCacheManager manager = new ByteArrayCacheManager();
LocalCacheFileInStream stream = setupWithSingleFile(testData, manager);
int partialReadSize = fileSize / 5;
int offset = fileSize / 5;
byte[] cacheMiss = new byte[partialReadSize];
stream.unbuffer();
stream.seek(offset);
stream.unbuffer();
Assert.assertEquals(partialReadSize, stream.read(cacheMiss));
stream.unbuffer();
Assert.assertArrayEquals(
Arrays.copyOfRange(testData, offset, offset + partialReadSize), cacheMiss);
Assert.assertEquals(0, manager.mPagesServed);
Assert.assertEquals(1, manager.mPagesCached);
byte[] cacheHit = new byte[partialReadSize];
stream.unbuffer();
stream.seek(offset);
stream.unbuffer();
Assert.assertEquals(partialReadSize, stream.read(cacheHit));
stream.unbuffer();
Assert.assertArrayEquals(
Arrays.copyOfRange(testData, offset, offset + partialReadSize), cacheHit);
Assert.assertEquals(1, manager.mPagesServed);
}
|
@Around(SYNC_UPDATE_CONFIG_ALL)
public Object aroundSyncUpdateConfigAll(ProceedingJoinPoint pjp, HttpServletRequest request,
HttpServletResponse response, String dataId, String group, String content, String appName, String srcUser,
String tenant, String tag) throws Throwable {
if (!PropertyUtil.isManageCapacity()) {
return pjp.proceed();
}
LOGGER.info("[capacityManagement] aroundSyncUpdateConfigAll");
String betaIps = request.getHeader("betaIps");
if (StringUtils.isBlank(betaIps)) {
if (StringUtils.isBlank(tag)) {
// do capacity management limitation check for writing or updating config_info table.
if (configInfoPersistService.findConfigInfo(dataId, group, tenant) == null) {
// Write operation.
return do4Insert(pjp, request, response, group, tenant, content);
}
// Update operation.
return do4Update(pjp, request, response, dataId, group, tenant, content);
}
}
return pjp.proceed();
}
|
@Test
void testAroundSyncUpdateConfigAllForInsertRollbackAspect() throws Throwable {
//test with insert
//condition:
// 1. has tenant: true
// 2. capacity limit check: true
// 3. over cluster quota: false
// 4. tenant capacity: not null
// 5. over tenant max size: true/false (if tenant max size is 0, will use default max size)
when(PropertyUtil.isManageCapacity()).thenReturn(true);
when(PropertyUtil.isCapacityLimitCheck()).thenReturn(true);
when(configInfoPersistService.findConfigInfo(any(), any(), any())).thenReturn(null);
when(capacityService.insertAndUpdateClusterUsage(any(), anyBoolean())).thenReturn(true);
when(capacityService.updateClusterUsage(any())).thenReturn(true);
when(capacityService.updateTenantUsage(any(), eq(mockTenant))).thenReturn(true);
TenantCapacity localTenantCapacity = new TenantCapacity();
localTenantCapacity.setTenant(mockTenant);
localTenantCapacity.setMaxSize(10 * 1024);
localTenantCapacity.setMaxAggrCount(1024);
when(capacityService.getTenantCapacity(eq(mockTenant))).thenReturn(localTenantCapacity);
String localMockResult = null;
MockHttpServletRequest mockHttpServletRequest = new MockHttpServletRequest();
MockHttpServletResponse mockHttpServletResponse = new MockHttpServletResponse();
try {
localMockResult = (String) capacityManagementAspect.aroundSyncUpdateConfigAll(localMockProceedingJoinPoint,
mockHttpServletRequest, mockHttpServletResponse, mockDataId, mockGroup, mockContent, null, null, mockTenant, null);
} catch (Throwable e) {
assertEquals(e.getMessage(), mockException.getMessage());
}
assertNull(localMockResult);
Mockito.verify(capacityService, Mockito.times(0)).initTenantCapacity(eq(mockTenant));
Mockito.verify(capacityService, Mockito.times(1)).updateTenantUsage(eq(CounterMode.INCREMENT), eq(mockTenant));
Mockito.verify(capacityService, Mockito.times(1)).updateTenantUsage(eq(CounterMode.DECREMENT), eq(mockTenant));
Mockito.verify(capacityService, Mockito.times(1)).insertAndUpdateClusterUsage(eq(CounterMode.INCREMENT), anyBoolean());
Mockito.verify(capacityService, Mockito.times(1)).updateClusterUsage(eq(CounterMode.DECREMENT));
Mockito.verify(localMockProceedingJoinPoint, Mockito.times(1)).proceed();
}
|
@Override
public List<Integer> applyTransforms(List<Integer> originalGlyphIds)
{
List<Integer> intermediateGlyphsFromGsub = adjustRephPosition(originalGlyphIds);
intermediateGlyphsFromGsub = repositionGlyphs(intermediateGlyphsFromGsub);
for (String feature : FEATURES_IN_ORDER)
{
if (!gsubData.isFeatureSupported(feature))
{
if (feature.equals(RKRF_FEATURE) && gsubData.isFeatureSupported(VATU_FEATURE))
{
// Create your own rkrf feature from vatu feature
intermediateGlyphsFromGsub = applyRKRFFeature(
gsubData.getFeature(VATU_FEATURE),
intermediateGlyphsFromGsub);
}
LOG.debug("the feature {} was not found", feature);
continue;
}
LOG.debug("applying the feature {}", feature);
ScriptFeature scriptFeature = gsubData.getFeature(feature);
intermediateGlyphsFromGsub = applyGsubFeature(scriptFeature,
intermediateGlyphsFromGsub);
}
return Collections.unmodifiableList(intermediateGlyphsFromGsub);
}
|
@Test
void testApplyTransforms_blwf()
{
// given
List<Integer> glyphsAfterGsub = Arrays.asList(76,332);
// when
List<Integer> result = gsubWorkerForGujarati.applyTransforms(getGlyphIds("ટ્ર"));
// then
assertEquals(glyphsAfterGsub, result);
}
|
@Override
public Optional<SimpleLock> lock(LockConfiguration lockConfiguration) {
if (lockConfiguration.getLockAtMostFor().compareTo(minimalLockAtMostFor) < 0) {
throw new IllegalArgumentException(
"Can not use KeepAliveLockProvider with lockAtMostFor shorter than " + minimalLockAtMostFor);
}
Optional<SimpleLock> lock = wrapped.lock(lockConfiguration);
return lock.map(simpleLock -> new KeepAliveLock(lockConfiguration, simpleLock, executorService));
}
|
@Test
void shouldExtendMultipleTimes() {
SimpleLock extendedLock = mock(SimpleLock.class);
mockExtension(originalLock, Optional.of(extendedLock));
mockExtension(extendedLock, Optional.of(extendedLock));
Optional<SimpleLock> lock = provider.lock(lockConfiguration);
assertThat(lock).isNotNull();
tickMs(1_500);
verify(originalLock).extend(lockConfiguration.getLockAtMostFor(), ofMillis(500));
tickMs(1_500);
verify(extendedLock).extend(lockConfiguration.getLockAtMostFor(), ZERO);
lock.get().unlock();
verify(extendedLock).unlock();
tickMs(10_000);
verifyNoMoreInteractions(originalLock);
}
|
@Override
public boolean isOpen() {
return store.isOpen();
}
|
@Test
public void shouldReturnIsOpenForTimestampedStore() {
givenWrapperWithTimestampedStore();
// test "isOpen = true"
when(timestampedStore.isOpen()).thenReturn(true);
assertThat(wrapper.isOpen(), equalTo(true));
// test "isOpen = false"
when(timestampedStore.isOpen()).thenReturn(false);
assertThat(wrapper.isOpen(), equalTo(false));
}
|
private String getTraceId() {
return (String) exchange.getAttributes().get(GenericLoggingConstant.SHENYU_AGENT_TRACE_ID);
}
|
@Test
public void testGetTraceId() throws Exception {
loggingServerHttpResponse.setExchange(exchange);
exchange.getResponse().getHeaders();
Method method = loggingServerHttpResponse.getClass().getDeclaredMethod("getTraceId");
method.setAccessible(true);
String traceId = (String) method.invoke(loggingServerHttpResponse);
Assertions.assertEquals(traceId, "shenyu-agent-trace-id");
}
|
@VisibleForTesting
Schema convertSchema(org.apache.avro.Schema schema) {
return Schema.of(getFields(schema));
}
|
@Test
void convertSchema_nestedFields() {
com.google.cloud.bigquery.Schema expected = com.google.cloud.bigquery.Schema.of(
Field.newBuilder("nestedOne", StandardSQLTypeName.STRUCT,
Field.newBuilder("nestedOptionalInt", StandardSQLTypeName.INT64).setMode(Field.Mode.NULLABLE).build(),
Field.newBuilder("nestedRequiredDouble", StandardSQLTypeName.FLOAT64).setMode(Field.Mode.REQUIRED).build(),
Field.newBuilder("nestedTwo", StandardSQLTypeName.STRUCT,
Field.newBuilder("doublyNestedString", StandardSQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()).setMode(Field.Mode.REQUIRED).build())
.setMode(Field.Mode.NULLABLE).build());
Assertions.assertEquals(expected, SCHEMA_RESOLVER.convertSchema(NESTED_FIELDS));
}
|
public static ImmutableList<String> glob(final String glob) {
Path path = getGlobPath(glob);
int globIndex = getGlobIndex(path);
if (globIndex < 0) {
return of(glob);
}
return doGlob(path, searchPath(path, globIndex));
}
|
@Test
public void should_throw_exception_for_unknown_root() {
assertThrows(MocoException.class, () -> {
Globs.glob("unknown/src/test/resources/details/*.json");
});
}
|
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) {
return api.send(request);
}
|
@Test
public void createNewStickerSet() {
String setName = "testNEW" + System.currentTimeMillis() + "_by_pengrad_test_bot";
String title = "test112312312";
BaseResponse response = bot.execute(
new CreateNewStickerSet(chatId, setName, title, new InputSticker[]{
new InputSticker(stickerFile, Sticker.Format.Static, new String[]{"\uD83D\uDE00"})
.keywords(new String[]{"yes", "no"})
.maskPosition(new MaskPosition(MaskPosition.Point.forehead, 10f, 20f, 1f))
}, Sticker.Format.Static)
.needsRepainting(false));
assertTrue(response.isOk());
StickerSet set = bot.execute(new GetStickerSet(setName)).stickerSet();
assertEquals(setName, set.name());
assertEquals(title, set.title());
assertFalse(set.isVideo());
Sticker[] stickers = set.stickers();
assertEquals(1, stickers.length);
assertEquals("\uD83D\uDE00", stickers[0].emoji());
assertFalse(stickers[0].isVideo());
assertNull(stickers[0].needsRepainting());
assertNull(stickers[0].premiumAnimation());
assertNull(stickers[0].customEmojiId());
response = bot.execute(new SetStickerSetTitle(setName, "new title"));
assertTrue(response.isOk());
String stickerId = stickers[0].fileId();
response = bot.execute(new SetStickerEmojiList(stickerId, new String[]{"\uD83D\uDE00"}));
assertTrue(response.isOk());
response = bot.execute(new SetStickerKeywords(stickerId).keywords(new String[]{"ok"}));
assertTrue(response.isOk());
response = bot.execute(new SetStickerMaskPosition(stickerId)
.maskPosition(new MaskPosition(MaskPosition.Point.mouth, 0f, 0f, 0f)));
assertFalse(response.isOk());
assertEquals("Bad Request: STICKER_MASK_COORDS_NOT_SUPPORTED", response.description());
response = bot.execute(new DeleteStickerSet(setName));
assertTrue(response.isOk());
}
|
public void write(Row row) {
for (int i = 0; i < fieldWriters.length; i++) {
fieldWriters[i].write(row, i);
}
rowCount++;
}
|
@Test
public void testWrite() {
ArrowRecordBatch recordBatch = createArrowRecordBatch();
System.out.println("recordBatch " + recordBatch);
recordBatch.close();
}
|
@Override
public Object read(final PostgreSQLPacketPayload payload, final int parameterValueLength) {
byte[] bytes = new byte[parameterValueLength];
payload.getByteBuf().readBytes(bytes);
return ARRAY_PARAMETER_DECODER.decodeInt2Array(bytes, '{' != bytes[0]);
}
|
@Test
void assertRead() {
String parameterValue = "{\"11\",\"12\"}";
int expectedLength = 4 + parameterValue.length();
ByteBuf byteBuf = ByteBufTestUtils.createByteBuf(expectedLength);
byteBuf.writeInt(parameterValue.length());
byteBuf.writeCharSequence(parameterValue, StandardCharsets.ISO_8859_1);
byteBuf.readInt();
PostgreSQLPacketPayload payload = new PostgreSQLPacketPayload(byteBuf, StandardCharsets.UTF_8);
Object actual = newInstance().read(payload, parameterValue.length());
assertThat(actual, is(new short[]{11, 12}));
assertThat(byteBuf.readerIndex(), is(expectedLength));
}
|
@Override
public int compare(String s1, String s2) {
if (s1 == s2) { // NOSONAR false-positive: Compare Objects With Equals
return 0;
}
int h1 = s1.hashCode();
int h2 = s2.hashCode();
if (h1 < h2) {
return -1;
} else if (h1 > h2) {
return 1;
} else {
return s1.compareTo(s2);
}
}
|
@Test
public void sameHashCode() {
// Next two Strings have same hash code in Java - see http://www.drmaciver.com/2008/07/javalangstringhashcode/
String s1 = "Od";
String s2 = "PE";
assertThat(s1).hasSameHashCodeAs(s2);
assertThat(compare(s1, s2)).isNegative();
assertThat(compare(s2, s1)).isPositive();
}
|
@Override
public double entropy() {
return entropy;
}
|
@Test
public void testEntropy() {
System.out.println("entropy");
ChiSquareDistribution instance = new ChiSquareDistribution(20);
instance.rand();
assertEquals(3.229201359, instance.entropy(), 1E-7);
}
|
public boolean isSupported(final SQLStatement sqlStatement) {
for (Class<? extends SQLStatement> each : supportedSQLStatements) {
if (each.isAssignableFrom(sqlStatement.getClass())) {
return true;
}
}
for (Class<? extends SQLStatement> each : unsupportedSQLStatements) {
if (each.isAssignableFrom(sqlStatement.getClass())) {
return false;
}
}
return true;
}
|
@Test
void assertIsSupportedWithoutList() {
assertTrue(new SQLSupportedJudgeEngine(Collections.singleton(SelectStatement.class), Collections.singleton(UpdateStatement.class)).isSupported(mock(DeleteStatement.class)));
}
|
@Override
public long getPosition() throws IOException
{
checkClosed();
return currentPosition;
}
|
@Test
void testPositionSkip() throws IOException
{
byte[] values = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20 };
try (RandomAccessReadBuffer randomAccessSource = new RandomAccessReadBuffer(
new ByteArrayInputStream(values));
RandomAccessReadView randomAccessReadView = new RandomAccessReadView(
randomAccessSource, 10, 20))
{
assertEquals(0, randomAccessReadView.getPosition());
assertEquals(10, randomAccessReadView.peek());
randomAccessReadView.skip(5);
assertEquals(5, randomAccessReadView.getPosition());
assertEquals(15, randomAccessReadView.peek());
}
}
|
@JsonProperty("streamsProperties")
public Map<String, Object> getConfigOverrides() {
return PropertiesUtil.coerceTypes(configOverrides, false);
}
|
@Test
public void shouldHandleNullProps() {
assertThat(
new KsqlRequest("sql", null, SOME_REQUEST_PROPS, SOME_COMMAND_NUMBER).getConfigOverrides(),
is(Collections.emptyMap()));
}
|
public static AuditActor user(@Nonnull String username) {
if (isNullOrEmpty(username)) {
throw new IllegalArgumentException("username must not be null or empty");
}
return new AutoValue_AuditActor(URN_GRAYLOG_USER + username);
}
|
@Test
public void testUser() {
final AuditActor actor = AuditActor.user("jane");
assertThat(actor.urn()).isEqualTo("urn:graylog:user:jane");
}
|
@Override
public KCell getCell( int colnr, int rownr ) {
if ( rownr == 0 && colnr < headerRow.size() ) {
// only possible to return header
return new StaxPoiCell( headerRow.get( colnr ), rownr );
}
// if random access this will be very expensive
KCell[] row = getRow( rownr );
if ( row != null && rownr < row.length ) {
return row[colnr];
}
return null;
}
|
@Test
public void testReadCells() throws Exception {
KSheet sheet = getSampleSheet();
KCell cell = sheet.getCell( 1, 2 );
assertEquals( "One", cell.getValue() );
assertEquals( KCellType.LABEL, cell.getType() );
cell = sheet.getCell( 2, 2 );
assertEquals( KCellType.DATE, cell.getType() );
assertEquals( new Date( 1283817600000L ), cell.getValue() );
cell = sheet.getCell( 1, 3 );
assertEquals( "Two", cell.getValue() );
assertEquals( KCellType.LABEL, cell.getType() );
}
|
@PostConstruct
public void init() {
// blockRequestHandlerOptional has low priority
blockRequestHandlerOptional.ifPresent(GatewayCallbackManager::setBlockHandler);
initAppType();
initFallback();
}
|
@Test
public void testInit() {
config.init();
verify(gatewayProperties).getFallback(); // Check if fallback properties are fetched
Assert.assertNotNull(config.sentinelGatewayBlockExceptionHandler());
Assert.assertNotNull(config.sentinelGatewayFilter());
}
|
public static ECKeyPair createEcKeyPair()
throws InvalidAlgorithmParameterException,
NoSuchAlgorithmException,
NoSuchProviderException {
return createEcKeyPair(secureRandom());
}
|
@Test
public void testCreateEcKeyPair() throws Exception {
ECKeyPair ecKeyPair = Keys.createEcKeyPair();
assertEquals(ecKeyPair.getPublicKey().signum(), (1));
assertEquals(ecKeyPair.getPrivateKey().signum(), (1));
}
|
@Override public long get(long key1, long key2) {
return super.get0(key1, key2);
}
|
@Test
public void testPutGetMany() {
final long factor = 123456;
final int k = 1000;
for (int i = 1; i <= k; i++) {
long key1 = (long) i;
long key2 = key1 * factor;
insert(key1, key2);
}
for (int i = 1; i <= k; i++) {
long key1 = (long) i;
long key2 = key1 * factor;
long valueAddress = hsa.get(key1, key2);
assertEquals(key1, mem.getLong(valueAddress));
assertEquals(key2, mem.getLong(valueAddress + 8L));
}
}
|
@Override
public float readFloat()
throws EOFException {
if (availableLong() < 4) {
throw new EOFException();
}
float result = _dataBuffer.getFloat(_currentOffset);
_currentOffset += 4;
return result;
}
|
@Test
void testReadFloat()
throws EOFException {
float read = _dataBufferPinotInputStream.readFloat();
assertEquals(read, _byteBuffer.getFloat(0));
assertEquals(_dataBufferPinotInputStream.getCurrentOffset(), Float.BYTES);
}
|
public static FixedBuilder<Schema> fixed(String name) {
return builder().fixed(name);
}
|
@Test
void doc() {
Schema s = SchemaBuilder.fixed("myfixed").doc("mydoc").size(1);
assertEquals("mydoc", s.getDoc());
}
|
public List<TokenSplit> getSplits(String keyspace, String table, Optional<Long> sessionSplitsPerNode)
{
Set<TokenRange> tokenRanges = session.getTokenRanges();
if (tokenRanges.isEmpty()) {
throw new PrestoException(CASSANDRA_METADATA_ERROR, "The cluster metadata is not available. " +
"Please make sure that the Cassandra cluster is up and running, " +
"and that the contact points are specified correctly.");
}
if (tokenRanges.stream().anyMatch(TokenRange::isWrappedAround)) {
tokenRanges = unwrap(tokenRanges);
}
Optional<TokenRing> tokenRing = createForPartitioner(session.getPartitioner());
long totalPartitionsCount = getTotalPartitionsCount(keyspace, table, sessionSplitsPerNode);
List<TokenSplit> splits = new ArrayList<>();
for (TokenRange tokenRange : tokenRanges) {
if (tokenRange.isEmpty()) {
continue;
}
checkState(!tokenRange.isWrappedAround(), "all token ranges must be unwrapped at this step");
List<String> endpoints = getEndpoints(keyspace, tokenRange);
checkState(!endpoints.isEmpty(), "endpoints is empty for token range: %s", tokenRange);
if (!tokenRing.isPresent()) {
checkState(!tokenRange.isWrappedAround(), "all token ranges must be unwrapped at this step");
splits.add(createSplit(tokenRange, endpoints));
continue;
}
double tokenRangeRingFraction = tokenRing.get().getRingFraction(tokenRange.getStart().toString(), tokenRange.getEnd().toString());
long partitionsCountEstimate = round(totalPartitionsCount * tokenRangeRingFraction);
checkState(partitionsCountEstimate >= 0, "unexpected partitions count estimate: %d", partitionsCountEstimate);
int subSplitCount = max(toIntExact(partitionsCountEstimate / splitSize), 1);
List<TokenRange> subRanges = tokenRange.splitEvenly(subSplitCount);
for (TokenRange subRange : subRanges) {
if (subRange.isEmpty()) {
continue;
}
checkState(!subRange.isWrappedAround(), "all token ranges must be unwrapped at this step");
splits.add(createSplit(subRange, endpoints));
}
}
shuffle(splits, ThreadLocalRandom.current());
return unmodifiableList(splits);
}
|
@Test
public void testNonEmptyTable()
throws Exception
{
String tableName = "non_empty_table";
session.execute(format("CREATE TABLE %s.%s (key text PRIMARY KEY)", KEYSPACE, tableName));
for (int i = 0; i < PARTITION_COUNT; i++) {
session.execute(format("INSERT INTO %s.%s (key) VALUES ('%s')", KEYSPACE, tableName, "value" + i));
}
EmbeddedCassandra.refreshSizeEstimates(KEYSPACE, tableName);
List<TokenSplit> splits = splitManager.getSplits(KEYSPACE, tableName, Optional.empty());
assertEquals(splits.size(), PARTITION_COUNT / SPLIT_SIZE);
session.execute(format("DROP TABLE %s.%s", KEYSPACE, tableName));
}
|
@Override
public DiscreteResources add(DiscreteResources other) {
if (other instanceof EncodableDiscreteResources) {
EncodableDiscreteResources cast = (EncodableDiscreteResources) other;
LinkedHashMap<Class<?>, EncodedDiscreteResources> newMap =
Stream.concat(this.map.entrySet().stream(), cast.map.entrySet().stream())
.collect(Collectors.toMap(
Map.Entry::getKey,
Map.Entry::getValue,
EncodedDiscreteResources::add,
LinkedHashMap::new
));
return of(parent, newMap);
} else if (other instanceof EmptyDiscreteResources) {
return this;
}
return DiscreteResources.of(Sets.union(this.values(), other.values()));
}
|
@Test
public void testAdd() {
DiscreteResource res1 = Resources.discrete(DeviceId.deviceId("a"), PortNumber.portNumber(1)).resource();
DiscreteResource res2 = Resources.discrete(DeviceId.deviceId("a"), PortNumber.portNumber(2)).resource();
DiscreteResources sut = EncodableDiscreteResources.of(ImmutableSet.of(res1));
DiscreteResources other = EncodableDiscreteResources.of(ImmutableSet.of(res2));
DiscreteResources expected = EncodableDiscreteResources.of(ImmutableSet.of(res1, res2));
assertThat(sut.add(other), is(expected));
}
|
@Override
public Optional<Rule> findByUuid(String uuid) {
ensureInitialized();
return Optional.ofNullable(rulesByUuid.get(uuid));
}
|
@Test
public void findByUuid_returns_absent_if_rule_does_not_exist_in_DB() {
Optional<Rule> rule = underTest.findByUuid(AC_RULE_UUID);
assertThat(rule).isEmpty();
}
|
public void registerBot(
String botPath,
Function<Update, BotApiMethod<?>> updateHandler,
Runnable setWebhook,
Runnable deleteWebhook
) throws TelegramApiException {
registerBot(DefaultTelegramWebhookBot
.builder()
.botPath(botPath)
.updateHandler(updateHandler)
.setWebhook(setWebhook)
.deleteWebhook(deleteWebhook)
.build());
}
|
@Test
public void testWhenUpdateIsReceivedOnWebhookUpdateReceivedIsCalled() throws IOException, TelegramApiException {
application.registerBot(telegramWebhookBot);
Request request = new Request.Builder()
.url("http://127.0.0.1:" + webhookOptions.getPort() + "/test")
.headers(Headers.of(headers))
.post(RequestBody.create(objectMapper.writeValueAsString(update), MediaType.parse("application/json")))
.build();
httpClient.newCall(request).execute();
assertNotNull(telegramWebhookBot.updateReceived);
assertTrue(telegramWebhookBot.isSetWebhookCalled());
assertFalse(telegramWebhookBot.isDeleteWebhookCalled());
assertEquals(update.getUpdateId(), telegramWebhookBot.updateReceived.getUpdateId());
}
|
public static String createFilename( String name ) {
StringBuilder filename = new StringBuilder();
for ( int i = 0; i < name.length(); i++ ) {
char c = name.charAt( i );
if ( Character.isUnicodeIdentifierPart( c ) ) {
filename.append( c );
} else if ( Character.isWhitespace( c ) ) {
filename.append( '_' );
}
}
return filename.toString().toLowerCase();
}
|
@Test
public void testCreateFilename() {
assertEquals( "dir" + Const.FILE_SEPARATOR + "file__1.ext", Const.createFilename( "dir" + Const.FILE_SEPARATOR,
"File\t~ 1", ".ext" ) );
assertEquals( "dir" + Const.FILE_SEPARATOR + "file__1.ext", Const.createFilename( "dir", "File\t~ 1", ".ext" ) );
}
|
@Override
public void setConfigAttributes(Object attributes) {
clear();
if (attributes == null) {
return;
}
Map attributeMap = (Map) attributes;
String materialType = (String) attributeMap.get(AbstractMaterialConfig.MATERIAL_TYPE);
if (SvnMaterialConfig.TYPE.equals(materialType)) {
addMaterialConfig(getSvnMaterial(), (Map) attributeMap.get(SvnMaterialConfig.TYPE));
} else if (HgMaterialConfig.TYPE.equals(materialType)) {
addMaterialConfig(getHgMaterial(), (Map) attributeMap.get(HgMaterialConfig.TYPE));
} else if (GitMaterialConfig.TYPE.equals(materialType)) {
addMaterialConfig(getGitMaterial(), (Map) attributeMap.get(GitMaterialConfig.TYPE));
} else if (P4MaterialConfig.TYPE.equals(materialType)) {
addMaterialConfig(getP4Material(), (Map) attributeMap.get(P4MaterialConfig.TYPE));
} else if (DependencyMaterialConfig.TYPE.equals(materialType)) {
addMaterialConfig(getDependencyMaterial(), (Map) attributeMap.get(DependencyMaterialConfig.TYPE));
} else if (TfsMaterialConfig.TYPE.equals(materialType)) {
addMaterialConfig(getTfsMaterial(), (Map) attributeMap.get(TfsMaterialConfig.TYPE));
} else if (PackageMaterialConfig.TYPE.equals(materialType)) {
addMaterialConfig(getPackageMaterial(), (Map) attributeMap.get(PackageMaterialConfig.TYPE));
} else if (PluggableSCMMaterialConfig.TYPE.equals(materialType)) {
addMaterialConfig(getSCMMaterial(), (Map) attributeMap.get(PluggableSCMMaterialConfig.TYPE));
}
}
|
@Test
public void shouldSetSvnConfigAttributesForMaterial() {
MaterialConfigs materialConfigs = new MaterialConfigs();
Map<String, Object> svnAttrMap = new HashMap<>();
svnAttrMap.put(SvnMaterialConfig.URL, "foo");
svnAttrMap.put(SvnMaterialConfig.USERNAME, "bar");
svnAttrMap.put(SvnMaterialConfig.PASSWORD, "baz");
svnAttrMap.put(SvnMaterialConfig.CHECK_EXTERNALS, false);
Map<String, Object> attributeMap = new HashMap<>();
attributeMap.put(AbstractMaterialConfig.MATERIAL_TYPE, SvnMaterialConfig.TYPE);
attributeMap.put(SvnMaterialConfig.TYPE, svnAttrMap);
materialConfigs.setConfigAttributes(attributeMap);
assertThat(materialConfigs.first()).isEqualTo(svn("foo", "bar", "baz", false));
}
|
public IcebergRecordObjectInspector(Types.StructType structType, List<ObjectInspector> objectInspectors) {
Preconditions.checkArgument(structType.fields().size() == objectInspectors.size());
this.structFields = Lists.newArrayListWithExpectedSize(structType.fields().size());
int position = 0;
for (Types.NestedField field : structType.fields()) {
ObjectInspector oi = objectInspectors.get(position);
Types.NestedField fieldInLowercase = Types.NestedField.of(field.fieldId(), field.isOptional(),
field.name().toLowerCase(), field.type(), field.doc());
IcebergRecordStructField structField = new IcebergRecordStructField(fieldInLowercase, oi, position);
structFields.add(structField);
position++;
}
}
|
@Test
public void testIcebergRecordObjectInspector() {
Schema schema = new Schema(
required(1, "integer_field", Types.IntegerType.get()),
required(2, "struct_field", Types.StructType.of(
Types.NestedField.required(3, "string_field", Types.StringType.get())))
);
Record record = RandomGenericData.generate(schema, 1, 0L).get(0);
Record innerRecord = record.get(1, Record.class);
StructObjectInspector soi = (StructObjectInspector) IcebergObjectInspector.create(schema);
Assert.assertEquals(ImmutableList.of(record.get(0), record.get(1)), soi.getStructFieldsDataAsList(record));
StructField integerField = soi.getStructFieldRef("integer_field");
Assert.assertEquals(record.get(0), soi.getStructFieldData(record, integerField));
StructField structField = soi.getStructFieldRef("struct_field");
Object innerData = soi.getStructFieldData(record, structField);
Assert.assertEquals(innerRecord, innerData);
StructObjectInspector innerSoi = (StructObjectInspector) structField.getFieldObjectInspector();
StructField stringField = innerSoi.getStructFieldRef("string_field");
Assert.assertEquals(ImmutableList.of(innerRecord.get(0)), innerSoi.getStructFieldsDataAsList(innerRecord));
Assert.assertEquals(innerRecord.get(0), innerSoi.getStructFieldData(innerData, stringField));
}
|
@Override
public RouteContext route(final ShardingRule shardingRule) {
RouteContext result = new RouteContext();
Collection<String> logicTableNames = getLogicTableNames();
if (logicTableNames.isEmpty()) {
result.getRouteUnits().addAll(getBroadcastTableRouteUnits(shardingRule, ""));
return result;
}
if (logicTableNames.size() > 1 && shardingRule.isAllBindingTables(logicTableNames)) {
result.getRouteUnits().addAll(getBindingTableRouteUnits(shardingRule, logicTableNames));
} else {
Collection<RouteContext> routeContexts = getRouteContexts(shardingRule, logicTableNames);
RouteContext routeContext = new ShardingCartesianRoutingEngine(routeContexts).route(shardingRule);
result.getOriginalDataNodes().addAll(routeContext.getOriginalDataNodes());
result.getRouteUnits().addAll(routeContext.getRouteUnits());
}
return result;
}
|
@Test
void assertRouteForEmptyTable() {
Collection<String> tableNames = Collections.emptyList();
ShardingTableBroadcastRoutingEngine shardingTableBroadcastRoutingEngine =
new ShardingTableBroadcastRoutingEngine(mock(ShardingSphereDatabase.class), createSQLStatementContext(tableNames), tableNames);
RouteContext routeContext = shardingTableBroadcastRoutingEngine.route(createShardingRule());
assertRouteUnitWithoutTables(routeContext);
}
|
public static KafkaUserModel fromCrd(KafkaUser kafkaUser,
String secretPrefix,
boolean aclsAdminApiSupported) {
KafkaUserModel result = new KafkaUserModel(kafkaUser.getMetadata().getNamespace(),
kafkaUser.getMetadata().getName(),
Labels.fromResource(kafkaUser).withStrimziKind(kafkaUser.getKind()),
secretPrefix);
validateTlsUsername(kafkaUser);
validateDesiredPassword(kafkaUser);
result.setOwnerReference(kafkaUser);
result.setAuthentication(kafkaUser.getSpec().getAuthentication());
if (kafkaUser.getSpec().getAuthorization() != null && kafkaUser.getSpec().getAuthorization().getType().equals(KafkaUserAuthorizationSimple.TYPE_SIMPLE)) {
if (aclsAdminApiSupported) {
KafkaUserAuthorizationSimple simple = (KafkaUserAuthorizationSimple) kafkaUser.getSpec().getAuthorization();
result.setSimpleAclRules(simple.getAcls());
} else {
throw new InvalidResourceException("Simple authorization ACL rules are configured but not supported in the Kafka cluster configuration.");
}
}
result.setQuotas(kafkaUser.getSpec().getQuotas());
if (kafkaUser.getSpec().getTemplate() != null
&& kafkaUser.getSpec().getTemplate().getSecret() != null
&& kafkaUser.getSpec().getTemplate().getSecret().getMetadata() != null) {
result.templateSecretLabels = kafkaUser.getSpec().getTemplate().getSecret().getMetadata().getLabels();
result.templateSecretAnnotations = kafkaUser.getSpec().getTemplate().getSecret().getMetadata().getAnnotations();
}
return result;
}
|
@Test
public void testFromCrdAclsWithAclsAdminApiSupportMissing() {
InvalidResourceException e = assertThrows(InvalidResourceException.class, () -> KafkaUserModel.fromCrd(scramShaUser, UserOperatorConfig.SECRET_PREFIX.defaultValue(), false));
assertThat(e.getMessage(), is("Simple authorization ACL rules are configured but not supported in the Kafka cluster configuration."));
}
|
public static SchemaKStream<?> buildSource(
final PlanBuildContext buildContext,
final DataSource dataSource,
final QueryContext.Stacker contextStacker
) {
final boolean windowed = dataSource.getKsqlTopic().getKeyFormat().isWindowed();
switch (dataSource.getDataSourceType()) {
case KSTREAM:
return windowed
? buildWindowedStream(
buildContext,
dataSource,
contextStacker
) : buildStream(
buildContext,
dataSource,
contextStacker
);
case KTABLE:
return windowed
? buildWindowedTable(
buildContext,
dataSource,
contextStacker
) : buildTable(
buildContext,
dataSource,
contextStacker
);
default:
throw new UnsupportedOperationException("Source type:" + dataSource.getDataSourceType());
}
}
|
@Test
public void shouldBuildNonWindowedStream() {
// Given:
givenNonWindowedStream();
// When:
final SchemaKStream<?> result = SchemaKSourceFactory.buildSource(
buildContext,
dataSource,
contextStacker
);
// Then:
assertThat(result, not(instanceOf(SchemaKTable.class)));
assertThat(result.getSourceStep(), instanceOf(StreamSource.class));
assertValidSchema(result);
assertThat(result.getSourceStep().getSources(), is(empty()));
}
|
public boolean matches(String input) {
return MATCHER.matches(input, pattern);
}
|
@Test
public void testMatchesOnPath() throws Exception {
GlobMatcher matcher = new GlobMatcher("A*/F*/P*");
assertTrue(matcher.matches("A Folder/Folder/Pipeline"));
assertFalse(matcher.matches("A Folder/Sub/Pipeline"));
}
|
@Udf
public String rpad(
@UdfParameter(description = "String to be padded") final String input,
@UdfParameter(description = "Target length") final Integer targetLen,
@UdfParameter(description = "Padding string") final String padding) {
if (input == null) {
return null;
}
if (padding == null || padding.isEmpty() || targetLen == null || targetLen < 0) {
return null;
}
final StringBuilder sb = new StringBuilder(targetLen + padding.length());
sb.append(input);
final int padChars = Math.max(targetLen - input.length(), 0);
for (int i = 0; i < padChars; i += padding.length()) {
sb.append(padding);
}
sb.setLength(targetLen);
return sb.toString();
}
|
@Test
public void shouldPadInputString() {
final String result = udf.rpad("foo", 7, "Bar");
assertThat(result, is("fooBarB"));
}
|
int log2Floor(long n) {
if (n < 0) {
throw new IllegalArgumentException("must be non-negative");
}
return n == 0 ? -1 : LongMath.log2(n, RoundingMode.FLOOR);
}
|
@Test
public void testLog2Floor_zero() {
OrderedCode orderedCode = new OrderedCode();
assertEquals(-1, orderedCode.log2Floor(0));
}
|
public static void error(final Logger logger, final String format, final Supplier<Object> supplier) {
if (logger.isErrorEnabled()) {
logger.error(format, supplier.get());
}
}
|
@Test
public void testNeverErrorFormat() {
when(logger.isErrorEnabled()).thenReturn(false);
LogUtils.error(logger, "testError: {}", supplier);
verify(supplier, never()).get();
}
|
@Udf(description = "Converts a number of milliseconds since 1970-01-01 00:00:00 UTC/GMT into the"
+ " string representation of the timestamp in the given format. Single quotes in the"
+ " timestamp format can be escaped with '', for example: 'yyyy-MM-dd''T''HH:mm:ssX'."
+ " The system default time zone is used when no time zone is explicitly provided."
+ " The format pattern should be in the format expected"
+ " by java.time.format.DateTimeFormatter")
public String timestampToString(
@UdfParameter(
description = "Milliseconds since"
+ " January 1, 1970, 00:00:00 UTC/GMT.") final long epochMilli,
@UdfParameter(
description = "The format pattern should be in the format expected by"
+ " java.time.format.DateTimeFormatter.") final String formatPattern) {
if (formatPattern == null) {
return null;
}
try {
final Timestamp timestamp = new Timestamp(epochMilli);
final DateTimeFormatter formatter = formatters.get(formatPattern);
return timestamp.toInstant()
.atZone(ZoneId.systemDefault())
.format(formatter);
} catch (final ExecutionException | RuntimeException e) {
throw new KsqlFunctionException("Failed to format timestamp " + epochMilli
+ " with formatter '" + formatPattern
+ "': " + e.getMessage(), e);
}
}
|
@Test
public void shouldSupportEmbeddedChars() {
// When:
final Object result = udf.timestampToString(1638360611123L,
"yyyy-MM-dd'T'HH:mm:ss.SSS'Fred'");
// Then:
final String expectedResult = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Fred'")
.format(new Date(1638360611123L));
assertThat(result, is(expectedResult));
}
|
public synchronized void synchronizeSlaveServers( SlaveServer slaveServer ) {
synchronizeSlaveServers( slaveServer, slaveServer.getName() );
}
|
@Test
public void synchronizeSlaveServers_sync_shared_only() throws Exception {
final String slaveServerName = "SlaveServer";
JobMeta job1 = createJobMeta();
SlaveServer slaveServer1 = createSlaveServer( slaveServerName, true );
job1.setSlaveServers( Collections.singletonList( slaveServer1 ) );
spoonDelegates.jobs.addJob( job1 );
JobMeta job2 = createJobMeta();
SlaveServer unsharedSlaveServer2 = createSlaveServer( slaveServerName, false );
job2.setSlaveServers( Collections.singletonList( unsharedSlaveServer2 ) );
spoonDelegates.jobs.addJob( job2 );
JobMeta job3 = createJobMeta();
SlaveServer slaveServer3 = createSlaveServer( slaveServerName, true );
job3.setSlaveServers( Collections.singletonList( slaveServer3 ) );
spoonDelegates.jobs.addJob( job3 );
slaveServer3.setHostname( AFTER_SYNC_VALUE );
sharedUtil.synchronizeSlaveServers( slaveServer3 );
assertThat( slaveServer1.getHostname(), equalTo( AFTER_SYNC_VALUE ) );
assertThat( unsharedSlaveServer2.getHostname(), equalTo( BEFORE_SYNC_VALUE ) );
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.