focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
public boolean hasErrors() {
for (ConfigurationProperty property : this) {
if (property.hasErrors()) {
return true;
}
}
return false;
}
|
@Test
void hasErrorsShouldVerifyIfAnyConfigurationPropertyHasErrors() {
ConfigurationProperty outputDirectory = mock(ConfigurationProperty.class);
ConfigurationProperty inputDirectory = mock(ConfigurationProperty.class);
when(outputDirectory.hasErrors()).thenReturn(false);
when(inputDirectory.hasErrors()).thenReturn(true);
Configuration configuration = new Configuration(outputDirectory, inputDirectory);
assertThat(configuration.hasErrors()).isTrue();
verify(outputDirectory).hasErrors();
verify(inputDirectory).hasErrors();
}
|
@Override
public void handlerRule(final RuleData ruleData) {
Optional.ofNullable(ruleData.getHandle()).ifPresent(s -> {
final ModifyResponseRuleHandle modifyResponseRuleHandle = GsonUtils.getInstance().fromJson(s, ModifyResponseRuleHandle.class);
CACHED_HANDLE.get().cachedHandle(CacheKeyUtils.INST.getKey(ruleData), modifyResponseRuleHandle);
});
}
|
@Test
public void handlerSelectorTest() {
modifyResponsePluginDataHandler.handlerRule(ruleData);
ModifyResponseRuleHandle modifyResponseRuleHandle = ModifyResponsePluginDataHandler.CACHED_HANDLE.get().obtainHandle(CacheKeyUtils.INST.getKey(ruleData));
assertEquals(400, modifyResponseRuleHandle.getStatusCode());
}
|
@Override
public Set<UnloadDecision> findBundlesForUnloading(LoadManagerContext context,
Map<String, Long> recentlyUnloadedBundles,
Map<String, Long> recentlyUnloadedBrokers) {
final var conf = context.brokerConfiguration();
decisionCache.clear();
stats.clear();
Map<String, BrokerLookupData> availableBrokers;
try {
availableBrokers = context.brokerRegistry().getAvailableBrokerLookupDataAsync()
.get(context.brokerConfiguration().getMetadataStoreOperationTimeoutSeconds(), TimeUnit.SECONDS);
} catch (ExecutionException | InterruptedException | TimeoutException e) {
counter.update(Failure, Unknown);
log.warn("Failed to fetch available brokers. Stop unloading.", e);
return decisionCache;
}
try {
final var loadStore = context.brokerLoadDataStore();
stats.setLoadDataStore(loadStore);
boolean debugMode = ExtensibleLoadManagerImpl.debug(conf, log);
var skipReason = stats.update(
context.brokerLoadDataStore(), availableBrokers, recentlyUnloadedBrokers, conf);
if (skipReason.isPresent()) {
if (debugMode) {
log.warn(CANNOT_CONTINUE_UNLOAD_MSG
+ " Skipped the load stat update. Reason:{}.",
skipReason.get());
}
counter.update(Skip, skipReason.get());
return decisionCache;
}
counter.updateLoadData(stats.avg, stats.std);
if (debugMode) {
log.info("brokers' load stats:{}", stats);
}
// skip metrics
int numOfBrokersWithEmptyLoadData = 0;
int numOfBrokersWithFewBundles = 0;
final double targetStd = conf.getLoadBalancerBrokerLoadTargetStd();
boolean transfer = conf.isLoadBalancerTransferEnabled();
if (stats.std() > targetStd
|| isUnderLoaded(context, stats.peekMinBroker(), stats)
|| isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) {
unloadConditionHitCount++;
} else {
unloadConditionHitCount = 0;
}
if (unloadConditionHitCount <= conf.getLoadBalancerSheddingConditionHitCountThreshold()) {
if (debugMode) {
log.info(CANNOT_CONTINUE_UNLOAD_MSG
+ " Shedding condition hit count:{} is less than or equal to the threshold:{}.",
unloadConditionHitCount, conf.getLoadBalancerSheddingConditionHitCountThreshold());
}
counter.update(Skip, HitCount);
return decisionCache;
}
while (true) {
if (!stats.hasTransferableBrokers()) {
if (debugMode) {
log.info(CANNOT_CONTINUE_UNLOAD_MSG
+ " Exhausted target transfer brokers.");
}
break;
}
UnloadDecision.Reason reason;
if (stats.std() > targetStd) {
reason = Overloaded;
} else if (isUnderLoaded(context, stats.peekMinBroker(), stats)) {
reason = Underloaded;
if (debugMode) {
log.info(String.format("broker:%s is underloaded:%s although "
+ "load std:%.2f <= targetStd:%.2f. "
+ "Continuing unload for this underloaded broker.",
stats.peekMinBroker(),
context.brokerLoadDataStore().get(stats.peekMinBroker()).get(),
stats.std(), targetStd));
}
} else if (isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) {
reason = Overloaded;
if (debugMode) {
log.info(String.format("broker:%s is overloaded:%s although "
+ "load std:%.2f <= targetStd:%.2f. "
+ "Continuing unload for this overloaded broker.",
stats.peekMaxBroker(),
context.brokerLoadDataStore().get(stats.peekMaxBroker()).get(),
stats.std(), targetStd));
}
} else {
if (debugMode) {
log.info(CANNOT_CONTINUE_UNLOAD_MSG
+ "The overall cluster load meets the target, std:{} <= targetStd:{}."
+ "minBroker:{} is not underloaded. maxBroker:{} is not overloaded.",
stats.std(), targetStd, stats.peekMinBroker(), stats.peekMaxBroker());
}
break;
}
String maxBroker = stats.pollMaxBroker();
String minBroker = stats.peekMinBroker();
Optional<BrokerLoadData> maxBrokerLoadData = context.brokerLoadDataStore().get(maxBroker);
Optional<BrokerLoadData> minBrokerLoadData = context.brokerLoadDataStore().get(minBroker);
if (maxBrokerLoadData.isEmpty()) {
log.error(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " MaxBrokerLoadData is empty.", maxBroker));
numOfBrokersWithEmptyLoadData++;
continue;
}
if (minBrokerLoadData.isEmpty()) {
log.error("Can't transfer load to broker:{}. MinBrokerLoadData is empty.", minBroker);
numOfBrokersWithEmptyLoadData++;
continue;
}
double maxLoad = maxBrokerLoadData.get().getWeightedMaxEMA();
double minLoad = minBrokerLoadData.get().getWeightedMaxEMA();
double offload = (maxLoad - minLoad) / 2;
BrokerLoadData brokerLoadData = maxBrokerLoadData.get();
double maxBrokerThroughput = brokerLoadData.getMsgThroughputIn()
+ brokerLoadData.getMsgThroughputOut();
double minBrokerThroughput = minBrokerLoadData.get().getMsgThroughputIn()
+ minBrokerLoadData.get().getMsgThroughputOut();
double offloadThroughput = maxBrokerThroughput * offload / maxLoad;
if (debugMode) {
log.info(String.format(
"Attempting to shed load from broker:%s%s, which has the max resource "
+ "usage:%.2f%%, targetStd:%.2f,"
+ " -- Trying to offload %.2f%%, %.2f KByte/s of traffic.",
maxBroker, transfer ? " to broker:" + minBroker : "",
maxLoad * 100,
targetStd,
offload * 100,
offloadThroughput / KB
));
}
double trafficMarkedToOffload = 0;
double trafficMarkedToGain = 0;
Optional<TopBundlesLoadData> bundlesLoadData = context.topBundleLoadDataStore().get(maxBroker);
if (bundlesLoadData.isEmpty() || bundlesLoadData.get().getTopBundlesLoadData().isEmpty()) {
log.error(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " TopBundlesLoadData is empty.", maxBroker));
numOfBrokersWithEmptyLoadData++;
continue;
}
var maxBrokerTopBundlesLoadData = bundlesLoadData.get().getTopBundlesLoadData();
if (maxBrokerTopBundlesLoadData.size() == 1) {
numOfBrokersWithFewBundles++;
log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " Sole namespace bundle:%s is overloading the broker. ",
maxBroker, maxBrokerTopBundlesLoadData.iterator().next()));
continue;
}
Optional<TopBundlesLoadData> minBundlesLoadData = context.topBundleLoadDataStore().get(minBroker);
var minBrokerTopBundlesLoadDataIter =
minBundlesLoadData.isPresent() ? minBundlesLoadData.get().getTopBundlesLoadData().iterator() :
null;
if (maxBrokerTopBundlesLoadData.isEmpty()) {
numOfBrokersWithFewBundles++;
log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " Broker overloaded despite having no bundles", maxBroker));
continue;
}
int remainingTopBundles = maxBrokerTopBundlesLoadData.size();
for (var e : maxBrokerTopBundlesLoadData) {
String bundle = e.bundleName();
if (channel != null && !channel.isOwner(bundle, maxBroker)) {
if (debugMode) {
log.warn(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " MaxBroker:%s is not the owner.", bundle, maxBroker));
}
continue;
}
if (recentlyUnloadedBundles.containsKey(bundle)) {
if (debugMode) {
log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " Bundle has been recently unloaded at ts:%d.",
bundle, recentlyUnloadedBundles.get(bundle)));
}
continue;
}
if (!isTransferable(context, availableBrokers, bundle, maxBroker, Optional.of(minBroker))) {
if (debugMode) {
log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " This unload can't meet "
+ "affinity(isolation) or anti-affinity group policies.", bundle));
}
continue;
}
if (remainingTopBundles <= 1) {
if (debugMode) {
log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " The remaining bundles in TopBundlesLoadData from the maxBroker:%s is"
+ " less than or equal to 1.",
bundle, maxBroker));
}
break;
}
var bundleData = e.stats();
double maxBrokerBundleThroughput = bundleData.msgThroughputIn + bundleData.msgThroughputOut;
boolean swap = false;
List<Unload> minToMaxUnloads = new ArrayList<>();
double minBrokerBundleSwapThroughput = 0.0;
if (trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput > offloadThroughput) {
// see if we can swap bundles from min to max broker to balance better.
if (transfer && minBrokerTopBundlesLoadDataIter != null) {
var maxBrokerNewThroughput =
maxBrokerThroughput - trafficMarkedToOffload + trafficMarkedToGain
- maxBrokerBundleThroughput;
var minBrokerNewThroughput =
minBrokerThroughput + trafficMarkedToOffload - trafficMarkedToGain
+ maxBrokerBundleThroughput;
while (minBrokerTopBundlesLoadDataIter.hasNext()) {
var minBrokerBundleData = minBrokerTopBundlesLoadDataIter.next();
if (!isTransferable(context, availableBrokers,
minBrokerBundleData.bundleName(), minBroker, Optional.of(maxBroker))) {
continue;
}
var minBrokerBundleThroughput =
minBrokerBundleData.stats().msgThroughputIn
+ minBrokerBundleData.stats().msgThroughputOut;
var maxBrokerNewThroughputTmp = maxBrokerNewThroughput + minBrokerBundleThroughput;
var minBrokerNewThroughputTmp = minBrokerNewThroughput - minBrokerBundleThroughput;
if (maxBrokerNewThroughputTmp < maxBrokerThroughput
&& minBrokerNewThroughputTmp < maxBrokerThroughput) {
minToMaxUnloads.add(new Unload(minBroker,
minBrokerBundleData.bundleName(), Optional.of(maxBroker)));
maxBrokerNewThroughput = maxBrokerNewThroughputTmp;
minBrokerNewThroughput = minBrokerNewThroughputTmp;
minBrokerBundleSwapThroughput += minBrokerBundleThroughput;
if (minBrokerNewThroughput <= maxBrokerNewThroughput
&& maxBrokerNewThroughput < maxBrokerThroughput * 0.75) {
swap = true;
break;
}
}
}
}
if (!swap) {
if (debugMode) {
log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " The traffic to unload:%.2f - gain:%.2f = %.2f KByte/s is "
+ "greater than the target :%.2f KByte/s.",
bundle,
(trafficMarkedToOffload + maxBrokerBundleThroughput) / KB,
trafficMarkedToGain / KB,
(trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput) / KB,
offloadThroughput / KB));
}
break;
}
}
Unload unload;
if (transfer) {
if (swap) {
minToMaxUnloads.forEach(minToMaxUnload -> {
if (debugMode) {
log.info("Decided to gain bundle:{} from min broker:{}",
minToMaxUnload.serviceUnit(), minToMaxUnload.sourceBroker());
}
var decision = new UnloadDecision();
decision.setUnload(minToMaxUnload);
decision.succeed(reason);
decisionCache.add(decision);
});
if (debugMode) {
log.info(String.format(
"Total traffic %.2f KByte/s to transfer from min broker:%s to max broker:%s.",
minBrokerBundleSwapThroughput / KB, minBroker, maxBroker));
trafficMarkedToGain += minBrokerBundleSwapThroughput;
}
}
unload = new Unload(maxBroker, bundle, Optional.of(minBroker));
} else {
unload = new Unload(maxBroker, bundle);
}
var decision = new UnloadDecision();
decision.setUnload(unload);
decision.succeed(reason);
decisionCache.add(decision);
trafficMarkedToOffload += maxBrokerBundleThroughput;
remainingTopBundles--;
if (debugMode) {
log.info(String.format("Decided to unload bundle:%s, throughput:%.2f KByte/s."
+ " The traffic marked to unload:%.2f - gain:%.2f = %.2f KByte/s."
+ " Target:%.2f KByte/s.",
bundle, maxBrokerBundleThroughput / KB,
trafficMarkedToOffload / KB,
trafficMarkedToGain / KB,
(trafficMarkedToOffload - trafficMarkedToGain) / KB,
offloadThroughput / KB));
}
}
if (trafficMarkedToOffload > 0) {
var adjustedOffload =
(trafficMarkedToOffload - trafficMarkedToGain) * maxLoad / maxBrokerThroughput;
stats.offload(maxLoad, minLoad, adjustedOffload);
if (debugMode) {
log.info(
String.format("brokers' load stats:%s, after offload{max:%.2f, min:%.2f, offload:%.2f}",
stats, maxLoad, minLoad, adjustedOffload));
}
} else {
numOfBrokersWithFewBundles++;
log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " There is no bundle that can be unloaded in top bundles load data. "
+ "Consider splitting bundles owned by the broker "
+ "to make each bundle serve less traffic "
+ "or increasing loadBalancerMaxNumberOfBundlesInBundleLoadReport"
+ " to report more bundles in the top bundles load data.", maxBroker));
}
} // while end
if (debugMode) {
log.info("decisionCache:{}", decisionCache);
}
if (decisionCache.isEmpty()) {
UnloadDecision.Reason reason;
if (numOfBrokersWithEmptyLoadData > 0) {
reason = NoLoadData;
} else if (numOfBrokersWithFewBundles > 0) {
reason = NoBundles;
} else {
reason = HitCount;
}
counter.update(Skip, reason);
} else {
unloadConditionHitCount = 0;
}
} catch (Throwable e) {
log.error("Failed to process unloading. ", e);
this.counter.update(Failure, Unknown);
}
return decisionCache;
}
|
@Test
public void testEmptyTopBundlesLoadData() {
UnloadCounter counter = new UnloadCounter();
TransferShedder transferShedder = new TransferShedder(counter);
var ctx = getContext();
var brokerLoadDataStore = ctx.brokerLoadDataStore();
brokerLoadDataStore.pushAsync("broker1:8080", getCpuLoad(ctx, 2, "broker1:8080"));
brokerLoadDataStore.pushAsync("broker2:8080", getCpuLoad(ctx, 4, "broker2:8080"));
brokerLoadDataStore.pushAsync("broker3:8080", getCpuLoad(ctx, 6, "broker3:8080"));
brokerLoadDataStore.pushAsync("broker4:8080", getCpuLoad(ctx, 80, "broker4:8080"));
brokerLoadDataStore.pushAsync("broker5:8080", getCpuLoad(ctx, 90, "broker5:8080"));
var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of());
assertTrue(res.isEmpty());
assertEquals(counter.getBreakdownCounters().get(Skip).get(NoLoadData).get(), 1);
assertEquals(counter.getLoadAvg(), setupLoadAvg);
assertEquals(counter.getLoadStd(), setupLoadStd);
}
|
public static String getSourceIpForHttpRequest(HttpServletRequest httpServletRequest) {
String sourceIp = getSourceIp();
// If can't get from request context, get from http request.
if (StringUtils.isBlank(sourceIp)) {
sourceIp = WebUtils.getRemoteIp(httpServletRequest);
}
return sourceIp;
}
|
@Test
void getSourceIpForHttpRequest() {
when(request.getRemoteAddr()).thenReturn("3.3.3.3");
assertEquals("2.2.2.2", NamingRequestUtil.getSourceIpForHttpRequest(request));
RequestContextHolder.getContext().getBasicContext().getAddressContext().setSourceIp(null);
assertEquals("1.1.1.1", NamingRequestUtil.getSourceIpForHttpRequest(request));
RequestContextHolder.getContext().getBasicContext().getAddressContext().setRemoteIp(null);
assertEquals("3.3.3.3", NamingRequestUtil.getSourceIpForHttpRequest(request));
}
|
private ScanResult<Object> entryScanIterator(RedisClient client, String startPos, String pattern, int count) {
RFuture<ScanResult<Object>> f = entryScanIteratorAsync(client, startPos, pattern, count);
return get(f);
}
|
@Test
public void testEntryScanIterator() {
RScoredSortedSet<String> set = redisson.getScoredSortedSet("test");
set.add(1.1, "v1");
set.add(1.2, "v2");
set.add(1.3, "v3");
Iterator<ScoredEntry<String>> entries = set.entryIterator();
assertThat(entries).toIterable().containsExactly(new ScoredEntry<>(1.1, "v1"),
new ScoredEntry<>(1.2, "v2"), new ScoredEntry<>(1.3, "v3"));
}
|
@Override
public <T> Optional<T> valueAs(Class<T> type) {
checkNotNull(type);
if (type == Object.class || type == double.class || type == Double.class) {
@SuppressWarnings("unchecked")
T value = (T) Double.valueOf(this.value);
return Optional.of(value);
}
return Optional.empty();
}
|
@Test
public void testValueAsPrimitiveDouble() {
ContinuousResource resource = Resources.continuous(D1, P1, Bandwidth.class)
.resource(BW1.bps());
Optional<Double> volume = resource.valueAs(double.class);
assertThat(volume.get(), is(BW1.bps()));
}
|
public static CustomEnvironmentPluginManager getInstance() {
return INSTANCE;
}
|
@Test
void testInstance() {
CustomEnvironmentPluginManager instance = CustomEnvironmentPluginManager.getInstance();
assertNotNull(instance);
}
|
@Override
public <T extends State> T state(StateNamespace namespace, StateTag<T> address) {
return workItemState.get(namespace, address, StateContexts.nullContext());
}
|
@Test
public void testOrderedListMergePendingAddsAndDeletes() {
SettableFuture<Map<Range<Instant>, RangeSet<Long>>> orderedListFuture = SettableFuture.create();
orderedListFuture.set(null);
SettableFuture<Map<Range<Instant>, RangeSet<Instant>>> deletionsFuture =
SettableFuture.create();
deletionsFuture.set(null);
when(mockReader.valueFuture(
systemKey(NAMESPACE, "orderedList" + IdTracker.IDS_AVAILABLE_STR),
STATE_FAMILY,
IdTracker.IDS_AVAILABLE_CODER))
.thenReturn(orderedListFuture);
when(mockReader.valueFuture(
systemKey(NAMESPACE, "orderedList" + IdTracker.DELETIONS_STR),
STATE_FAMILY,
IdTracker.SUBRANGE_DELETIONS_CODER))
.thenReturn(deletionsFuture);
SettableFuture<Iterable<TimestampedValue<String>>> fromStorage = SettableFuture.create();
when(mockReader.orderedListFuture(
FULL_ORDERED_LIST_RANGE,
key(NAMESPACE, "orderedList"),
STATE_FAMILY,
StringUtf8Coder.of()))
.thenReturn(fromStorage);
StateTag<OrderedListState<String>> addr =
StateTags.orderedList("orderedList", StringUtf8Coder.of());
OrderedListState<String> orderedListState = underTest.state(NAMESPACE, addr);
orderedListState.add(TimestampedValue.of("second", Instant.ofEpochMilli(1)));
orderedListState.add(TimestampedValue.of("third", Instant.ofEpochMilli(2)));
orderedListState.add(TimestampedValue.of("fourth", Instant.ofEpochMilli(2)));
orderedListState.add(TimestampedValue.of("eighth", Instant.ofEpochMilli(10)));
orderedListState.add(TimestampedValue.of("ninth", Instant.ofEpochMilli(15)));
orderedListState.clearRange(Instant.ofEpochMilli(2), Instant.ofEpochMilli(5));
orderedListState.add(TimestampedValue.of("fourth", Instant.ofEpochMilli(4)));
fromStorage.set(
ImmutableList.of(
TimestampedValue.of("first", Instant.ofEpochMilli(-1)),
TimestampedValue.of("fifth", Instant.ofEpochMilli(5)),
TimestampedValue.of("sixth", Instant.ofEpochMilli(5)),
TimestampedValue.of("seventh", Instant.ofEpochMilli(5)),
TimestampedValue.of("tenth", Instant.ofEpochMilli(20))));
TimestampedValue[] expected =
Iterables.toArray(
ImmutableList.of(
TimestampedValue.of("first", Instant.ofEpochMilli(-1)),
TimestampedValue.of("second", Instant.ofEpochMilli(1)),
TimestampedValue.of("fourth", Instant.ofEpochMilli(4)),
TimestampedValue.of("fifth", Instant.ofEpochMilli(5)),
TimestampedValue.of("sixth", Instant.ofEpochMilli(5)),
TimestampedValue.of("seventh", Instant.ofEpochMilli(5)),
TimestampedValue.of("eighth", Instant.ofEpochMilli(10)),
TimestampedValue.of("ninth", Instant.ofEpochMilli(15)),
TimestampedValue.of("tenth", Instant.ofEpochMilli(20))),
TimestampedValue.class);
TimestampedValue[] read = Iterables.toArray(orderedListState.read(), TimestampedValue.class);
assertArrayEquals(expected, read);
}
|
@Override
public boolean find(final Path file, final ListProgressListener listener) throws BackgroundException {
try {
if(containerService.isContainer(file)) {
final List<B2BucketResponse> buckets = session.getClient().listBuckets();
for(B2BucketResponse bucket : buckets) {
if(StringUtils.equals(containerService.getContainer(file).getName(), bucket.getBucketName())) {
return true;
}
}
}
else {
try {
attributes.find(file, listener);
return true;
}
catch(NotfoundException e) {
return false;
}
}
return false;
}
catch(B2ApiException e) {
throw new B2ExceptionMappingService(fileid).map("Failure to read attributes of {0}", e, file);
}
catch(IOException e) {
throw new DefaultIOExceptionMappingService().map("Failure to read attributes of {0}", e, file);
}
}
|
@Test
public void testFindCommonPrefix() throws Exception {
final B2VersionIdProvider fileid = new B2VersionIdProvider(session);
final Path container = new Path("test-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume));
assertTrue(new B2FindFeature(session, fileid).find(container));
final String prefix = new AlphanumericRandomStringService().random();
final Path intermediate = new Path(container, prefix, EnumSet.of(Path.Type.directory));
final Path test = new B2TouchFeature(session, fileid).touch(new Path(intermediate, new AsciiRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus());
assertTrue(new B2FindFeature(session, fileid).find(test));
assertFalse(new B2FindFeature(session, fileid).find(new Path(test.getAbsolute(), EnumSet.of(Path.Type.directory))));
assertTrue(new B2FindFeature(session, fileid).find(intermediate));
// Ignore 404 for placeholder and search for common prefix
assertTrue(new B2FindFeature(session, fileid).find(new Path(container, prefix, EnumSet.of(Path.Type.directory, Path.Type.placeholder))));
assertTrue(new B2ObjectListService(session, fileid).list(intermediate,
new DisabledListProgressListener()).contains(test));
new B2DeleteFeature(session, fileid).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
assertFalse(new B2FindFeature(session, fileid).find(test));
assertFalse(new B2FindFeature(session, fileid).find(intermediate));
final PathCache cache = new PathCache(1);
final Path directory = new Path(container, prefix, EnumSet.of(Path.Type.directory, Path.Type.placeholder));
assertFalse(new CachingFindFeature(session, cache, new B2FindFeature(session, fileid)).find(directory));
assertFalse(cache.isCached(directory));
assertFalse(new B2FindFeature(session, fileid).find(new Path(container, prefix, EnumSet.of(Path.Type.directory, Path.Type.placeholder))));
}
|
public static SpanHandler create(SpanHandler[] handlers,
AtomicBoolean noop) {
if (handlers.length == 0) return SpanHandler.NOOP;
if (handlers.length == 1) return new NoopAwareSpanHandler(handlers[0], noop);
return new NoopAwareSpanHandler(new CompositeSpanHandler(handlers), noop);
}
|
@Test void create_emptyIsNoop() {
assertThat(NoopAwareSpanHandler.create(new SpanHandler[0], noop))
.isEqualTo(SpanHandler.NOOP);
}
|
public void terminateCluster(final List<String> deleteTopicPatterns) {
terminatePersistentQueries();
deleteSinkTopics(deleteTopicPatterns);
deleteTopics(managedTopics);
ksqlEngine.close();
}
|
@Test
public void shouldClosePersistentQueries() {
// When:
clusterTerminator.terminateCluster(Collections.emptyList());
// Then:
verify(persistentQuery0).close();
verify(persistentQuery1).close();
}
|
static UnixResolverOptions parseEtcResolverOptions() throws IOException {
return parseEtcResolverOptions(new File(ETC_RESOLV_CONF_FILE));
}
|
@Test
public void defaultValueReturnedIfAttemptsOptionsIsNotPresent(@TempDir Path tempDir) throws IOException {
File f = buildFile(tempDir, "search localdomain\n" +
"nameserver 127.0.0.11\n");
assertEquals(16, parseEtcResolverOptions(f).attempts());
}
|
public static Document readXML(File file) {
Assert.notNull(file, "Xml file is null !");
if (false == file.exists()) {
throw new UtilException("File [{}] not a exist!", file.getAbsolutePath());
}
if (false == file.isFile()) {
throw new UtilException("[{}] not a file!", file.getAbsolutePath());
}
try {
file = file.getCanonicalFile();
} catch (IOException e) {
// ignore
}
BufferedInputStream in = null;
try {
in = FileUtil.getInputStream(file);
return readXML(in);
} finally {
IoUtil.close(in);
}
}
|
@Test
public void readTest() {
final Document doc = XmlUtil.readXML("test.xml");
assertNotNull(doc);
}
|
public static void main(String[] args) throws Exception {
TikaCLI cli = new TikaCLI();
if (cli.testForHelp(args)) {
cli.usage();
return;
} else if (cli.testForBatch(args)) {
String[] batchArgs = BatchCommandLineBuilder.build(args);
BatchProcessDriverCLI batchDriver = new BatchProcessDriverCLI(batchArgs);
batchDriver.execute();
return;
} else if (cli.testForAsync(args)) {
async(args);
return;
}
if (args.length > 0) {
for (String arg : args) {
cli.process(arg);
}
if (cli.pipeMode) {
cli.process("-");
}
} else {
// Started with no arguments. Wait for up to 0.1s to see if
// we have something waiting in standard input and use the
// pipe mode if we have. If no input is seen, start the GUI.
if (System.in.available() == 0) {
Thread.sleep(100);
}
if (System.in.available() > 0) {
cli.process("-");
} else {
cli.process("--gui");
}
}
}
|
@Test
public void testExtractInlineImages() throws Exception {
String[] params = {"--extract-dir=" + extractDir.toAbsolutePath(), "-z", resourcePrefix + "/testPDF_childAttachments.pdf"};
TikaCLI.main(params);
String[] tempFileNames = extractDir
.toFile()
.list();
assertNotNull(tempFileNames);
String allFiles = String.join(" : ", tempFileNames);
Path jpeg = extractDir.resolve("image0.jpg");
//tiff isn't extracted without optional image dependency
// File tiff = new File(tempFile, "image1.tif");
Path jobOptions = extractDir.resolve("Press Quality(1).joboptions");
Path doc = extractDir.resolve("Unit10.doc");
assertExtracted(jpeg, allFiles);
assertExtracted(jobOptions, allFiles);
assertExtracted(doc, allFiles);
}
|
public static GMeans fit(double[][] data, int kmax) {
return fit(data, kmax, 100, 1E-4);
}
|
@Test
public void testUSPS() throws Exception {
System.out.println("USPS");
MathEx.setSeed(19650218); // to get repeatable results.
double[][] x = USPS.x;
int[] y = USPS.y;
double[][] testx = USPS.testx;
int[] testy = USPS.testy;
GMeans model = GMeans.fit(x, 10);
System.out.println(model);
double r = RandIndex.of(y, model.y);
double r2 = AdjustedRandIndex.of(y, model.y);
System.out.format("Training rand index = %.2f%%, adjusted rand index = %.2f%%%n", 100.0 * r, 100.0 * r2);
assertEquals(0.9137, r, 1E-4);
assertEquals(0.5485, r2, 1E-4);
System.out.format("MI = %.2f%n", MutualInformation.of(y, model.y));
System.out.format("NMI.joint = %.2f%%%n", 100 * NormalizedMutualInformation.joint(y, model.y));
System.out.format("NMI.max = %.2f%%%n", 100 * NormalizedMutualInformation.max(y, model.y));
System.out.format("NMI.min = %.2f%%%n", 100 * NormalizedMutualInformation.min(y, model.y));
System.out.format("NMI.sum = %.2f%%%n", 100 * NormalizedMutualInformation.sum(y, model.y));
System.out.format("NMI.sqrt = %.2f%%%n", 100 * NormalizedMutualInformation.sqrt(y, model.y));
int[] p = new int[testx.length];
for (int i = 0; i < testx.length; i++) {
p[i] = model.predict(testx[i]);
}
r = RandIndex.of(testy, p);
r2 = AdjustedRandIndex.of(testy, p);
System.out.format("Testing rand index = %.2f%%, adjusted rand index = %.2f%%%n", 100.0 * r, 100.0 * r2);
assertEquals(0.9012, r, 1E-4);
assertEquals(0.4822, r2, 1E-4);
java.nio.file.Path temp = Write.object(model);
Read.object(temp);
}
|
public ChineseDate(Date date) {
this(LocalDateTimeUtil.ofDate(date.toInstant()));
}
|
@Test
public void chineseDateTest() {
ChineseDate date = new ChineseDate(DateUtil.parseDate("2020-01-25"));
assertEquals("2020-01-25 00:00:00", date.getGregorianDate().toString());
assertEquals(2020, date.getChineseYear());
assertEquals(1, date.getMonth());
assertEquals("一月", date.getChineseMonth());
assertEquals("正月", date.getChineseMonthName());
assertEquals(1, date.getDay());
assertEquals("初一", date.getChineseDay());
assertEquals("庚子", date.getCyclical());
assertEquals("鼠", date.getChineseZodiac());
assertEquals("春节", date.getFestivals());
assertEquals("庚子鼠年 正月初一", date.toString());
date = new ChineseDate(DateUtil.parseDate("2020-01-14"));
assertEquals("己亥猪年 腊月二十", date.toString());
date = new ChineseDate(DateUtil.parseDate("2020-01-24"));
assertEquals("己亥猪年 腊月三十", date.toString());
assertEquals("2019-12-30", date.toStringNormal());
}
|
@Override
public <ReqT, RespT> ServerCall.Listener<ReqT> interceptCall(final ServerCall<ReqT, RespT> call,
final Metadata headers,
final ServerCallHandler<ReqT, RespT> next) {
return getAuthenticatedDevice(call)
.map(authenticatedDevice -> Contexts.interceptCall(Context.current()
.withValue(AuthenticationUtil.CONTEXT_AUTHENTICATED_DEVICE, authenticatedDevice),
call, headers, next))
.orElseGet(() -> closeAsUnauthenticated(call));
}
|
@Test
void interceptCall() {
final ClientConnectionManager clientConnectionManager = getClientConnectionManager();
when(clientConnectionManager.getAuthenticatedDevice(any())).thenReturn(Optional.empty());
GrpcTestUtils.assertStatusException(Status.UNAUTHENTICATED, this::getAuthenticatedDevice);
final AuthenticatedDevice authenticatedDevice = new AuthenticatedDevice(UUID.randomUUID(), Device.PRIMARY_ID);
when(clientConnectionManager.getAuthenticatedDevice(any())).thenReturn(Optional.of(authenticatedDevice));
final GetAuthenticatedDeviceResponse response = getAuthenticatedDevice();
assertEquals(UUIDUtil.toByteString(authenticatedDevice.accountIdentifier()), response.getAccountIdentifier());
assertEquals(authenticatedDevice.deviceId(), response.getDeviceId());
}
|
static int[] findMinMaxLengthsInSymbols(String[] symbols) {
int min = Integer.MAX_VALUE;
int max = 0;
for (String symbol : symbols) {
int len = symbol.length();
// some SENTINEL values can be empty strings, the month at index 12 or the
// weekday at index 0
if (len == 0)
continue;
min = Math.min(min, len);
max = Math.max(max, len);
}
return new int[] { min, max };
}
|
@Test
public void findMinMaxLengthsInSymbolsWithTrivialInputs() {
String[] symbols = new String[] { "a", "bb" };
int[] results = CharSequenceToRegexMapper.findMinMaxLengthsInSymbols(symbols);
assertEquals(1, results[0]);
assertEquals(2, results[1]);
}
|
public static void isTrue(boolean expression, String message) {
if (expression == false) {
throw new IllegalArgumentException(message);
}
}
|
@Test
public void testIsTrue() {
Utils.isTrue(true, "foo");
}
|
public static String buildFromPartsMap(String partsRule, Map<String, String> partsMap) {
if (partsMap != null && !partsMap.isEmpty()) {
Multimap<String, String> multimap = partsMap.entrySet().stream().collect(ArrayListMultimap::create,
(m, e) -> m.put(e.getKey(), e.getValue()), Multimap::putAll);
return buildFromPartsMap(partsRule, multimap);
}
return "";
}
|
@Test
void testBuildFromPartsMap() {
Multimap<String, String> partsMap = ArrayListMultimap.create();
partsMap.put("year", "2018");
partsMap.put("month", "05");
partsMap.put("year-summary", "true");
partsMap.put("half-year", "true");
// Dispatch string parts are sorted.
String dispatchCriteria = DispatchCriteriaHelper.buildFromPartsMap("month && year", partsMap);
assertEquals("/month=05/year=2018", dispatchCriteria);
// Only 1 parameter should be taken into account according to rules.
dispatchCriteria = DispatchCriteriaHelper.buildFromPartsMap("year", partsMap);
assertEquals("/year=2018", dispatchCriteria);
// 2 parameters should be taken into account according to rules with no inclusion of year.
dispatchCriteria = DispatchCriteriaHelper.buildFromPartsMap("month && year-summary", partsMap);
assertEquals("/month=05/year-summary=true", dispatchCriteria);
}
|
public boolean isRunning() {
try {
process.exitValue();
} catch (IllegalThreadStateException e) {
return true;
}
return false;
}
|
@Test
void shouldReturnFalseWhenAProcessHasExited() {
Process process = getMockedProcess(mock(OutputStream.class));
when(process.exitValue()).thenReturn(1);
ProcessWrapper processWrapper = new ProcessWrapper(process, null, "", inMemoryConsumer(), UTF_8, null);
assertThat(processWrapper.isRunning()).isFalse();
}
|
@Override
public <K, V> Map<K, V> toMap(DataTable dataTable, Type keyType, Type valueType) {
requireNonNull(dataTable, "dataTable may not be null");
requireNonNull(keyType, "keyType may not be null");
requireNonNull(valueType, "valueType may not be null");
if (dataTable.isEmpty()) {
return emptyMap();
}
DataTable keyColumn = dataTable.columns(0, 1);
DataTable valueColumns = dataTable.columns(1);
String firstHeaderCell = keyColumn.cell(0, 0);
boolean firstHeaderCellIsBlank = firstHeaderCell == null || firstHeaderCell.isEmpty();
List<K> keys = convertEntryKeys(keyType, keyColumn, valueType, firstHeaderCellIsBlank);
if (valueColumns.isEmpty()) {
return createMap(keyType, keys, valueType, nCopies(keys.size(), null));
}
boolean keysImplyTableRowTransformer = keys.size() == dataTable.height() - 1;
List<V> values = convertEntryValues(valueColumns, keyType, valueType, keysImplyTableRowTransformer);
if (keys.size() != values.size()) {
throw keyValueMismatchException(firstHeaderCellIsBlank, keys.size(), keyType, values.size(), valueType);
}
return createMap(keyType, keys, valueType, values);
}
|
@Test
void to_map_of_object_to_unknown_type__throws_exception__register_table_entry_transformer() {
DataTable table = parse("",
"| code | lat | lon |",
"| KMSY | 29.993333 | -90.258056 |",
"| KSFO | 37.618889 | -122.375 |",
"| KSEA | 47.448889 | -122.309444 |",
"| KJFK | 40.639722 | -73.778889 |");
registry.defineDataTableType(new DataTableType(AirPortCode.class, AIR_PORT_CODE_TABLE_ENTRY_TRANSFORMER));
CucumberDataTableException exception = assertThrows(
CucumberDataTableException.class,
() -> converter.toMap(table, AirPortCode.class, Coordinate.class));
assertThat(exception.getMessage(), startsWith(format("" +
"Can't convert DataTable to Map<%s, %s>.\n" +
"The first cell was either blank or you have registered a TableEntryTransformer for the key type.",
typeName(AirPortCode.class), typeName(Coordinate.class))));
}
|
public void writeAndComplete(ProxyContext ctx, ReceiveMessageRequest request, PopResult popResult) {
PopStatus status = popResult.getPopStatus();
List<MessageExt> messageFoundList = popResult.getMsgFoundList();
try {
switch (status) {
case FOUND:
if (messageFoundList.isEmpty()) {
streamObserver.onNext(ReceiveMessageResponse.newBuilder()
.setStatus(ResponseBuilder.getInstance().buildStatus(Code.MESSAGE_NOT_FOUND, "no match message"))
.build());
} else {
streamObserver.onNext(ReceiveMessageResponse.newBuilder()
.setStatus(ResponseBuilder.getInstance().buildStatus(Code.OK, Code.OK.name()))
.build());
Iterator<MessageExt> messageIterator = messageFoundList.iterator();
while (messageIterator.hasNext()) {
MessageExt curMessageExt = messageIterator.next();
Message curMessage = convertToMessage(curMessageExt);
try {
streamObserver.onNext(ReceiveMessageResponse.newBuilder()
.setMessage(curMessage)
.build());
} catch (Throwable t) {
this.processThrowableWhenWriteMessage(t, ctx, request, curMessageExt);
messageIterator.forEachRemaining(messageExt ->
this.processThrowableWhenWriteMessage(t, ctx, request, messageExt));
return;
}
}
}
break;
case POLLING_FULL:
streamObserver.onNext(ReceiveMessageResponse.newBuilder()
.setStatus(ResponseBuilder.getInstance().buildStatus(Code.TOO_MANY_REQUESTS, "polling full"))
.build());
break;
case NO_NEW_MSG:
case POLLING_NOT_FOUND:
default:
streamObserver.onNext(ReceiveMessageResponse.newBuilder()
.setStatus(ResponseBuilder.getInstance().buildStatus(Code.MESSAGE_NOT_FOUND, "no new message"))
.build());
break;
}
} catch (Throwable t) {
writeResponseWithErrorIgnore(
ReceiveMessageResponse.newBuilder().setStatus(ResponseBuilder.getInstance().buildStatus(t)).build());
} finally {
onComplete();
}
}
|
@Test
public void testWriteMessage() {
ArgumentCaptor<String> changeInvisibleTimeMsgIdCaptor = ArgumentCaptor.forClass(String.class);
doReturn(CompletableFuture.completedFuture(mock(AckResult.class))).when(this.messagingProcessor)
.changeInvisibleTime(any(), any(), changeInvisibleTimeMsgIdCaptor.capture(), anyString(), anyString(), anyLong());
ArgumentCaptor<ReceiveMessageResponse> responseArgumentCaptor = ArgumentCaptor.forClass(ReceiveMessageResponse.class);
AtomicInteger onNextCallNum = new AtomicInteger(0);
doAnswer(mock -> {
if (onNextCallNum.incrementAndGet() > 2) {
throw new RuntimeException();
}
return null;
}).when(streamObserver).onNext(responseArgumentCaptor.capture());
List<MessageExt> messageExtList = new ArrayList<>();
messageExtList.add(createMessageExt(TOPIC, "tag"));
messageExtList.add(createMessageExt(TOPIC, "tag"));
PopResult popResult = new PopResult(PopStatus.FOUND, messageExtList);
writer.writeAndComplete(
ProxyContext.create(),
ReceiveMessageRequest.newBuilder()
.setGroup(Resource.newBuilder().setName(CONSUMER_GROUP).build())
.setMessageQueue(MessageQueue.newBuilder().setTopic(Resource.newBuilder().setName(TOPIC).build()).build())
.setFilterExpression(FilterExpression.newBuilder()
.setType(FilterType.TAG)
.setExpression("*")
.build())
.build(),
popResult
);
verify(streamObserver, times(1)).onCompleted();
verify(streamObserver, times(4)).onNext(any());
verify(this.messagingProcessor, times(1))
.changeInvisibleTime(any(), any(), anyString(), anyString(), anyString(), anyLong());
assertTrue(responseArgumentCaptor.getAllValues().get(0).hasStatus());
assertEquals(Code.OK, responseArgumentCaptor.getAllValues().get(0).getStatus().getCode());
assertTrue(responseArgumentCaptor.getAllValues().get(1).hasMessage());
assertEquals(messageExtList.get(0).getMsgId(), responseArgumentCaptor.getAllValues().get(1).getMessage().getSystemProperties().getMessageId());
assertEquals(messageExtList.get(1).getMsgId(), changeInvisibleTimeMsgIdCaptor.getValue());
}
|
protected String getRowKey(String resourceId, String tableName, String pk) {
return new StringBuilder().append(resourceId).append(LOCK_SPLIT).append(tableName).append(LOCK_SPLIT).append(pk)
.toString();
}
|
@Test
public void testGetRowKey() {
AbstractLocker locker = new AbstractLocker() {
@Override
public boolean acquireLock(List<RowLock> rowLock) {
return false;
}
@Override
public boolean acquireLock(List<RowLock> rowLock, boolean autoCommit, boolean skipCheckLock) {
return false;
}
@Override
public boolean releaseLock(List<RowLock> rowLock) {
return false;
}
@Override
public boolean isLockable(List<RowLock> rowLock) {
return false;
}
@Override
public void updateLockStatus(String xid, LockStatus lockStatus) {
}
};
// Call the getRowKey method
String rowKey = locker.getRowKey("resource1", "table1", "123");
// Assert that the row key is constructed correctly
Assertions.assertEquals("resource1^^^table1^^^123", rowKey);
}
|
@Override
public YamlEncryptTableRuleConfiguration swapToYamlConfiguration(final EncryptTableRuleConfiguration data) {
YamlEncryptTableRuleConfiguration result = new YamlEncryptTableRuleConfiguration();
for (EncryptColumnRuleConfiguration each : data.getColumns()) {
result.getColumns().put(each.getName(), columnSwapper.swapToYamlConfiguration(each));
}
result.setName(data.getName());
return result;
}
|
@Test
void assertSwapToYamlConfiguration() {
EncryptColumnRuleConfiguration encryptColumnTwoConfig = new EncryptColumnRuleConfiguration("encrypt_column_2", new EncryptColumnItemRuleConfiguration("encrypt_cipher_2", "test_encryptor_2"));
EncryptColumnRuleConfiguration encryptColumnThreeConfig =
new EncryptColumnRuleConfiguration("encrypt_column_3", new EncryptColumnItemRuleConfiguration("encrypt_cipher_3", "test_encryptor_3"));
Collection<EncryptColumnRuleConfiguration> encryptColumnRuleConfigs = Arrays.asList(
new EncryptColumnRuleConfiguration("encrypt_column_1", new EncryptColumnItemRuleConfiguration("encrypt_cipher_1", "test_encryptor_1")),
encryptColumnTwoConfig,
encryptColumnThreeConfig);
EncryptTableRuleConfiguration encryptTableRuleConfig = new EncryptTableRuleConfiguration("test_table", encryptColumnRuleConfigs);
YamlEncryptTableRuleConfiguration actualYamlEncryptTableRuleConfig = swapper.swapToYamlConfiguration(encryptTableRuleConfig);
assertThat(actualYamlEncryptTableRuleConfig.getName(), is("test_table"));
Map<String, YamlEncryptColumnRuleConfiguration> actualColumns = actualYamlEncryptTableRuleConfig.getColumns();
assertThat(actualColumns.size(), is(3));
YamlEncryptColumnRuleConfiguration actualYamlEncryptColumnRuleConfigFirst = actualColumns.get("encrypt_column_1");
assertThat(actualYamlEncryptColumnRuleConfigFirst.getCipher().getName(), is("encrypt_cipher_1"));
assertThat(actualYamlEncryptColumnRuleConfigFirst.getCipher().getEncryptorName(), is("test_encryptor_1"));
YamlEncryptColumnRuleConfiguration actualYamlEncryptColumnRuleConfigSecond = actualColumns.get("encrypt_column_2");
assertThat(actualYamlEncryptColumnRuleConfigSecond.getCipher().getName(), is("encrypt_cipher_2"));
assertThat(actualYamlEncryptColumnRuleConfigSecond.getCipher().getEncryptorName(), is("test_encryptor_2"));
YamlEncryptColumnRuleConfiguration actualYamlEncryptColumnRuleConfigThird = actualColumns.get("encrypt_column_3");
assertThat(actualYamlEncryptColumnRuleConfigThird.getCipher().getName(), is("encrypt_cipher_3"));
assertThat(actualYamlEncryptColumnRuleConfigThird.getCipher().getEncryptorName(), is("test_encryptor_3"));
}
|
public PrivateKey convertPrivateKey(final String privatePemKey) {
StringReader keyReader = new StringReader(privatePemKey);
try {
PrivateKeyInfo privateKeyInfo = PrivateKeyInfo
.getInstance(new PEMParser(keyReader).readObject());
return new JcaPEMKeyConverter().getPrivateKey(privateKeyInfo);
} catch (IOException exception) {
throw new RuntimeException(exception);
}
}
|
@Test
void givenMalformedPrivateKey_whenConvertPrivateKey_thenThrowRuntimeException() {
// Given
String malformedPrivatePemKey = "-----BEGIN PRIVATE KEY-----\n" +
"malformedkey\n" +
"-----END PRIVATE KEY-----";
// When & Then
assertThatThrownBy(() -> KeyConverter.convertPrivateKey(malformedPrivatePemKey))
.isInstanceOf(RuntimeException.class)
.hasCauseInstanceOf(PEMException.class)
.hasMessageContaining("PEMException");
}
|
@Override
public String getMessage() {
if (!logPhi) {
return super.getMessage();
}
String answer;
if (hasHl7MessageBytes() || hasHl7AcknowledgementBytes()) {
String parentMessage = super.getMessage();
StringBuilder messageBuilder = new StringBuilder(
parentMessage.length()
+ (hasHl7MessageBytes() ? hl7MessageBytes.length : 0)
+ (hasHl7AcknowledgementBytes()
? hl7AcknowledgementBytes.length : 0));
messageBuilder.append(parentMessage);
if (hasHl7MessageBytes()) {
messageBuilder.append("\n\t{hl7Message [")
.append(hl7MessageBytes.length)
.append("] = ");
hl7Util.appendBytesAsPrintFriendlyString(messageBuilder, hl7MessageBytes, 0, hl7MessageBytes.length);
messageBuilder.append('}');
}
if (hasHl7AcknowledgementBytes()) {
messageBuilder.append("\n\t{hl7Acknowledgement [")
.append(hl7AcknowledgementBytes.length)
.append("] = ");
hl7Util.appendBytesAsPrintFriendlyString(messageBuilder, hl7AcknowledgementBytes, 0,
hl7AcknowledgementBytes.length);
messageBuilder.append('}');
}
answer = messageBuilder.toString();
} else {
answer = super.getMessage();
}
return answer;
}
|
@Test
public void testEmptyAcknowledgement() {
instance = new MllpException(EXCEPTION_MESSAGE, HL7_MESSAGE_BYTES, EMPTY_BYTE_ARRAY, LOG_PHI_TRUE);
assertEquals(expectedMessage(HL7_MESSAGE, null), instance.getMessage());
}
|
public static Object convertValue(final Object value, final Class<?> convertType) throws SQLFeatureNotSupportedException {
ShardingSpherePreconditions.checkNotNull(convertType, () -> new SQLFeatureNotSupportedException("Type can not be null"));
if (null == value) {
return convertNullValue(convertType);
}
if (value.getClass() == convertType) {
return value;
}
if (value instanceof LocalDateTime) {
return convertLocalDateTimeValue((LocalDateTime) value, convertType);
}
if (value instanceof Timestamp) {
return convertTimestampValue((Timestamp) value, convertType);
}
if (URL.class.equals(convertType)) {
return convertURL(value);
}
if (value instanceof Number) {
return convertNumberValue(value, convertType);
}
if (value instanceof Date) {
return convertDateValue((Date) value, convertType);
}
if (value instanceof byte[]) {
return convertByteArrayValue((byte[]) value, convertType);
}
if (boolean.class.equals(convertType)) {
return convertBooleanValue(value);
}
if (String.class.equals(convertType)) {
return value.toString();
}
try {
return convertType.cast(value);
} catch (final ClassCastException ignored) {
throw new SQLFeatureNotSupportedException("getObject with type");
}
}
|
@Test
void assertConvertLocalDateTimeValue() throws SQLException {
LocalDateTime localDateTime = LocalDateTime.of(2021, Month.DECEMBER, 23, 19, 30);
assertThat(ResultSetUtils.convertValue(localDateTime, Timestamp.class), is(Timestamp.valueOf(localDateTime)));
}
|
void validateSnapshotsUpdate(
TableMetadata metadata, List<Snapshot> addedSnapshots, List<Snapshot> deletedSnapshots) {
if (metadata.currentSnapshot() == null) {
// no need to verify attempt to delete current snapshot if it doesn't exist
// deletedSnapshots is necessarily empty when original snapshots list is empty
return;
}
if (!addedSnapshots.isEmpty()) {
// latest snapshot can be deleted if new snapshots are added.
return;
}
long latestSnapshotId = metadata.currentSnapshot().snapshotId();
if (!deletedSnapshots.isEmpty()
&& deletedSnapshots.get(deletedSnapshots.size() - 1).snapshotId() == latestSnapshotId) {
throw new InvalidIcebergSnapshotException(
String.format("Cannot delete the latest snapshot %s", latestSnapshotId));
}
}
|
@Test
void testValidateSnapshotsUpdateWithSnapshotMetadata() throws IOException {
List<Snapshot> testSnapshots = IcebergTestUtil.getSnapshots();
List<Snapshot> extraTestSnapshots = IcebergTestUtil.getExtraSnapshots();
TableMetadata metadataWithSnapshots =
TableMetadata.buildFrom(noSnapshotsMetadata)
.setBranchSnapshot(testSnapshots.get(testSnapshots.size() - 1), SnapshotRef.MAIN_BRANCH)
.build();
Assertions.assertDoesNotThrow(
() ->
snapshotInspector.validateSnapshotsUpdate(
metadataWithSnapshots, testSnapshots, Collections.emptyList()));
// No validation error if snapshots are added and deleted
Assertions.assertDoesNotThrow(
() ->
snapshotInspector.validateSnapshotsUpdate(
metadataWithSnapshots, testSnapshots, testSnapshots));
// No validation error if snapshots are added and deleted
Assertions.assertDoesNotThrow(
() ->
snapshotInspector.validateSnapshotsUpdate(
metadataWithSnapshots, extraTestSnapshots, testSnapshots));
Assertions.assertThrows(
InvalidIcebergSnapshotException.class,
() ->
snapshotInspector.validateSnapshotsUpdate(
metadataWithSnapshots, Collections.emptyList(), testSnapshots));
Assertions.assertDoesNotThrow(
() ->
snapshotInspector.validateSnapshotsUpdate(
metadataWithSnapshots,
Collections.emptyList(),
testSnapshots.subList(0, testSnapshots.size() - 1)));
}
|
@Deprecated
public static boolean isEmpty( String val ) {
return Utils.isEmpty( val );
}
|
@Test
public void testIsEmptyStringArray() {
assertTrue( Const.isEmpty( (String[]) null ) );
assertTrue( Const.isEmpty( new String[] {} ) );
assertFalse( Const.isEmpty( new String[] { "test" } ) );
}
|
public static void retainMatching(Collection<String> values,
String... patterns) {
retainMatching(values, Arrays.asList(patterns));
}
|
@Test
public void testRetainMatchingWithNoMatchingPattern() throws Exception {
Collection<String> values = stringToList("A");
StringCollectionUtil.retainMatching(values, "B");
assertTrue(values.isEmpty());
}
|
public static Map<String, Object> merge(Map<String, Object> a, Map<String, Object> b) {
if (a == null && b == null) {
return null;
}
if (a == null || a.isEmpty()) {
return copyMap(b);
}
if (b == null || b.isEmpty()) {
return copyMap(a);
}
Map copy = copyMap(a);
Map<String, Object> copyMap = b
.entrySet()
.stream()
.collect(
() -> newHashMap(copy.size()),
(m, v) -> {
Object original = copy.get(v.getKey());
Object value = v.getValue();
Object found;
if (value == null && original == null) {
found = null;
} else if (value == null) {
found = original;
} else if (original == null) {
found = value;
} else if (value instanceof Map && original instanceof Map) {
found = merge((Map) original, (Map) value);
} else if (value instanceof Collection
&& original instanceof Collection) {
try {
found = Lists
.newArrayList(
(Collection) original,
(Collection) value
)
.stream()
.flatMap(Collection::stream)
.toList();
} catch (Exception e) {
throw new RuntimeException(e);
}
} else {
found = value;
}
m.put(v.getKey(), found);
},
HashMap::putAll
);
copy.putAll(copyMap);
return copy;
}
|
@SuppressWarnings("unchecked")
@Test
void mergeWithNull() {
var mapWithNull = new HashMap<String, String>();
mapWithNull.put("null", null);
Map<String, Object> a = Map.of(
"map", Map.of(
"map_a", Map.of("sub", mapWithNull),
"map_c", "c"
)
);
Map<String, Object> b = Map.of(
"map", Map.of(
"map_c", "e",
"map_d", "d"
)
);
Map<String, Object> merge = MapUtils.merge(a, b);
assertThat(((Map<String, Object>) merge.get("map")).size(), is(3));
assertThat(((Map<String, Object>) merge.get("map")).get("map_c"), is("e"));
assertThat(((Map<String, Object>) ((Map<String, Object>) ((Map<String, Object>) merge.get("map")).get("map_a")).get("sub")).get("null"), nullValue());
}
|
@CanIgnoreReturnValue
public Caffeine<K, V> softValues() {
requireState(valueStrength == null, "Value strength was already set to %s", valueStrength);
valueStrength = Strength.SOFT;
return this;
}
|
@Test
public void softValues() {
var cache = Caffeine.newBuilder().softValues().build();
assertThat(cache).isNotNull();
}
|
public TableMetadata readTableMetadata(String metadataLocation) {
URI metadataLocationUri = URI.create(metadataLocation);
// TODO: cache fileIO
FileIO fileIO = fileIOFactory.getFileIO(metadataLocationUri);
return CompletableFuture.supplyAsync(() -> TableMetadataParser.read(fileIO, metadataLocation))
.join();
}
|
@SneakyThrows
@Test
public void testGetTableMetadataFromLocalFS() {
when(mockFileIOFactory.getFileIO(any())).thenReturn(new SimpleLocalFileIO());
String metadataLocation =
Objects.requireNonNull(this.getClass().getResource("/iceberg.metadata.json"))
.toURI()
.toString();
TableMetadata tableMetadata = metadataService.readTableMetadata(metadataLocation);
assertThat(tableMetadata.uuid()).isEqualTo("55d4dc69-5b14-4483-bfc8-f33b80f99f99");
}
|
public JobStatsExtended enrich(JobStats jobStats) {
JobStats latestJobStats = getLatestJobStats(jobStats, previousJobStats);
if (lock.tryLock()) {
setFirstRelevantJobStats(latestJobStats);
setJobStatsExtended(latestJobStats);
setPreviousJobStats(latestJobStats);
lock.unlock();
}
return jobStatsExtended;
}
|
@Test
void estimatedTimeProcessingIsCalculated3() {
JobStats firstJobStats = getJobStats(now().minusMillis(10), 100L, 0L, 0L, 100L);
JobStats secondJobStats = getJobStats(now(), 99L, 0L, 0L, 101L);
JobStatsExtended jobStatsExtended = enrich(firstJobStats, secondJobStats);
assertThat(jobStatsExtended.getEstimation().isProcessingDone()).isFalse();
assertThat(jobStatsExtended.getEstimation().isEstimatedProcessingFinishedInstantAvailable()).isFalse();
}
|
public static void setFillInOutsideScopeExceptionStacktraces(boolean fillInStacktrace) {
if (lockdown) {
throw new IllegalStateException("Plugins can't be changed anymore");
}
fillInOutsideScopeExceptionStacktraces = fillInStacktrace;
}
|
@Test
public void noStacktraceFill_shouldHaveNoStacktrace() {
AutoDisposePlugins.setFillInOutsideScopeExceptionStacktraces(false);
OutsideScopeException started = new OutsideScopeException("Lifecycle not started");
assertThat(started.getStackTrace()).isEmpty();
}
|
@Override
public TypeSerializerSchemaCompatibility<T> resolveSchemaCompatibility(
TypeSerializerSnapshot<T> oldSerializerSnapshot) {
if (!(oldSerializerSnapshot instanceof AvroSerializerSnapshot)) {
return TypeSerializerSchemaCompatibility.incompatible();
}
AvroSerializerSnapshot<?> oldAvroSerializerSnapshot =
(AvroSerializerSnapshot<?>) oldSerializerSnapshot;
return resolveSchemaCompatibility(oldAvroSerializerSnapshot.schema, schema);
}
|
@Test
void nonValidSchemaEvaluationShouldResultInCompatibleSerializers() {
final AvroSerializer<GenericRecord> originalSerializer =
new AvroSerializer<>(GenericRecord.class, FIRST_REQUIRED_LAST_OPTIONAL);
final AvroSerializer<GenericRecord> newSerializer =
new AvroSerializer<>(GenericRecord.class, BOTH_REQUIRED);
TypeSerializerSnapshot<GenericRecord> originalSnapshot =
originalSerializer.snapshotConfiguration();
assertThat(
newSerializer
.snapshotConfiguration()
.resolveSchemaCompatibility(originalSnapshot))
.is(isIncompatible());
}
|
@Override
public PageResult<LoginLogDO> getLoginLogPage(LoginLogPageReqVO pageReqVO) {
return loginLogMapper.selectPage(pageReqVO);
}
|
@Test
public void testGetLoginLogPage() {
// mock 数据
LoginLogDO loginLogDO = randomPojo(LoginLogDO.class, o -> {
o.setUserIp("192.168.199.16");
o.setUsername("wang");
o.setResult(SUCCESS.getResult());
o.setCreateTime(buildTime(2021, 3, 6));
});
loginLogMapper.insert(loginLogDO);
// 测试 status 不匹配
loginLogMapper.insert(cloneIgnoreId(loginLogDO, o -> o.setResult(CAPTCHA_CODE_ERROR.getResult())));
// 测试 ip 不匹配
loginLogMapper.insert(cloneIgnoreId(loginLogDO, o -> o.setUserIp("192.168.128.18")));
// 测试 username 不匹配
loginLogMapper.insert(cloneIgnoreId(loginLogDO, o -> o.setUsername("yunai")));
// 测试 createTime 不匹配
loginLogMapper.insert(cloneIgnoreId(loginLogDO, o -> o.setCreateTime(buildTime(2021, 2, 6))));
// 构造调用参数
LoginLogPageReqVO reqVO = new LoginLogPageReqVO();
reqVO.setUsername("wang");
reqVO.setUserIp("192.168.199");
reqVO.setStatus(true);
reqVO.setCreateTime(buildBetweenTime(2021, 3, 5, 2021, 3, 7));
// 调用
PageResult<LoginLogDO> pageResult = loginLogService.getLoginLogPage(reqVO);
// 断言,只查到了一条符合条件的
assertEquals(1, pageResult.getTotal());
assertEquals(1, pageResult.getList().size());
assertPojoEquals(loginLogDO, pageResult.getList().get(0));
}
|
@SuppressWarnings("checkstyle:MissingSwitchDefault")
@Override
protected void doCommit(TableMetadata base, TableMetadata metadata) {
int version = currentVersion() + 1;
CommitStatus commitStatus = CommitStatus.FAILURE;
/* This method adds no fs scheme, and it persists in HTS that way. */
final String newMetadataLocation = rootMetadataFileLocation(metadata, version);
HouseTable houseTable = HouseTable.builder().build();
try {
// Now that we have metadataLocation we stamp it in metadata property.
Map<String, String> properties = new HashMap<>(metadata.properties());
failIfRetryUpdate(properties);
String currentTsString = String.valueOf(Instant.now(Clock.systemUTC()).toEpochMilli());
properties.put(getCanonicalFieldName("lastModifiedTime"), currentTsString);
if (base == null) {
properties.put(getCanonicalFieldName("creationTime"), currentTsString);
}
properties.put(
getCanonicalFieldName("tableVersion"),
properties.getOrDefault(
getCanonicalFieldName("tableLocation"), CatalogConstants.INITIAL_VERSION));
properties.put(getCanonicalFieldName("tableLocation"), newMetadataLocation);
String serializedSnapshotsToPut = properties.remove(CatalogConstants.SNAPSHOTS_JSON_KEY);
String serializedSnapshotRefs = properties.remove(CatalogConstants.SNAPSHOTS_REFS_KEY);
boolean isStageCreate =
Boolean.parseBoolean(properties.remove(CatalogConstants.IS_STAGE_CREATE_KEY));
logPropertiesMap(properties);
TableMetadata updatedMetadata = metadata.replaceProperties(properties);
if (serializedSnapshotsToPut != null) {
List<Snapshot> snapshotsToPut =
SnapshotsUtil.parseSnapshots(fileIO, serializedSnapshotsToPut);
Pair<List<Snapshot>, List<Snapshot>> snapshotsDiff =
SnapshotsUtil.symmetricDifferenceSplit(snapshotsToPut, updatedMetadata.snapshots());
List<Snapshot> appendedSnapshots = snapshotsDiff.getFirst();
List<Snapshot> deletedSnapshots = snapshotsDiff.getSecond();
snapshotInspector.validateSnapshotsUpdate(
updatedMetadata, appendedSnapshots, deletedSnapshots);
Map<String, SnapshotRef> snapshotRefs =
serializedSnapshotRefs == null
? new HashMap<>()
: SnapshotsUtil.parseSnapshotRefs(serializedSnapshotRefs);
updatedMetadata =
maybeAppendSnapshots(updatedMetadata, appendedSnapshots, snapshotRefs, true);
updatedMetadata = maybeDeleteSnapshots(updatedMetadata, deletedSnapshots);
}
final TableMetadata updatedMtDataRef = updatedMetadata;
metricsReporter.executeWithStats(
() ->
TableMetadataParser.write(updatedMtDataRef, io().newOutputFile(newMetadataLocation)),
InternalCatalogMetricsConstant.METADATA_UPDATE_LATENCY);
houseTable = houseTableMapper.toHouseTable(updatedMetadata);
if (!isStageCreate) {
houseTableRepository.save(houseTable);
} else {
/**
* Refresh current metadata for staged tables from newly created metadata file and disable
* "forced refresh" in {@link OpenHouseInternalTableOperations#commit(TableMetadata,
* TableMetadata)}
*/
refreshFromMetadataLocation(newMetadataLocation);
}
commitStatus = CommitStatus.SUCCESS;
} catch (InvalidIcebergSnapshotException e) {
throw new BadRequestException(e, e.getMessage());
} catch (CommitFailedException e) {
throw e;
} catch (HouseTableCallerException
| HouseTableNotFoundException
| HouseTableConcurrentUpdateException e) {
throw new CommitFailedException(e);
} catch (Throwable persistFailure) {
// Try to reconnect and determine the commit status for unknown exception
log.error(
"Encounter unexpected error while updating metadata.json for table:" + tableIdentifier,
persistFailure);
commitStatus = checkCommitStatus(newMetadataLocation, metadata);
switch (commitStatus) {
case SUCCESS:
log.debug("Calling doCommit succeeded");
break;
case FAILURE:
// logging error and exception-throwing co-existence is needed, given the exception
// handler in
// org.apache.iceberg.BaseMetastoreCatalog.BaseMetastoreCatalogTableBuilder.create swallow
// the
// nested exception information.
log.error("Exception details:", persistFailure);
throw new CommitFailedException(
persistFailure,
String.format(
"Persisting metadata file %s at version %s for table %s failed while persisting to house table",
newMetadataLocation, version, GSON.toJson(houseTable)));
case UNKNOWN:
throw new CommitStateUnknownException(persistFailure);
}
} finally {
switch (commitStatus) {
case FAILURE:
metricsReporter.count(InternalCatalogMetricsConstant.COMMIT_FAILED_CTR);
break;
case UNKNOWN:
metricsReporter.count(InternalCatalogMetricsConstant.COMMIT_STATE_UNKNOWN);
break;
default:
break; /*should never happen, kept to silence SpotBugs*/
}
}
}
|
@Test
void testDoCommitAppendSnapshotsToNonMainBranch() throws IOException {
List<Snapshot> testSnapshots = IcebergTestUtil.getSnapshots();
Map<String, String> properties = new HashMap<>(BASE_TABLE_METADATA.properties());
try (MockedStatic<TableMetadataParser> ignoreWriteMock =
Mockito.mockStatic(TableMetadataParser.class)) {
properties.put(
CatalogConstants.SNAPSHOTS_JSON_KEY,
SnapshotsUtil.serializedSnapshots(testSnapshots.subList(0, 1)));
properties.put(
CatalogConstants.SNAPSHOTS_REFS_KEY,
SnapshotsUtil.serializeMap(
IcebergTestUtil.obtainSnapshotRefsFromSnapshot(testSnapshots.get(0), "branch")));
properties.put(getCanonicalFieldName("tableLocation"), TEST_LOCATION);
TableMetadata metadata = BASE_TABLE_METADATA.replaceProperties(properties);
// verify throw an error when committing to non-main branch.
Assertions.assertThrows(
CommitStateUnknownException.class,
() -> openHouseInternalTableOperations.doCommit(BASE_TABLE_METADATA, metadata));
}
}
|
@VisibleForTesting
int getReturnCode() {
int successCode = CommandExecutorCodes.Kitchen.SUCCESS.getCode();
if ( getResult().getNrErrors() != 0 ) {
getLog().logError( BaseMessages.getString( getPkgClazz(), "Kitchen.Error.FinishedWithErrors" ) );
return CommandExecutorCodes.Kitchen.ERRORS_DURING_PROCESSING.getCode();
}
return getResult().getResult() ? successCode : CommandExecutorCodes.Kitchen.ERRORS_DURING_PROCESSING.getCode();
}
|
@Test
public void testReturnCodeWithErrors() {
try ( MockedStatic<BaseMessages> baseMessagesMockedStatic = mockStatic( BaseMessages.class ) ) {
baseMessagesMockedStatic.when( () -> BaseMessages.getString( any(), anyString() ) ).thenReturn( "" );
when( result.getNrErrors() ).thenReturn( 1L );
when( mockedKitchenCommandExecutor.getResult() ).thenReturn( result );
when( mockedKitchenCommandExecutor.getLog() ).thenReturn( logChannelInterface );
assertEquals( mockedKitchenCommandExecutor.getReturnCode(),
CommandExecutorCodes.Kitchen.ERRORS_DURING_PROCESSING.getCode() );
}
}
|
public synchronized Map<String, Object> getSubtaskProgress(String taskName, @Nullable String subtaskNames,
Executor executor, HttpClientConnectionManager connMgr, Map<String, String> workerEndpoints,
Map<String, String> requestHeaders, int timeoutMs)
throws Exception {
return getSubtaskProgress(taskName, subtaskNames,
new CompletionServiceHelper(executor, connMgr, HashBiMap.create(0)), workerEndpoints, requestHeaders,
timeoutMs);
}
|
@Test
public void testGetSubtaskProgressWithResponse()
throws Exception {
TaskDriver taskDriver = mock(TaskDriver.class);
JobConfig jobConfig = mock(JobConfig.class);
when(taskDriver.getJobConfig(anyString())).thenReturn(jobConfig);
JobContext jobContext = mock(JobContext.class);
when(taskDriver.getJobContext(anyString())).thenReturn(jobContext);
PinotHelixTaskResourceManager mgr =
new PinotHelixTaskResourceManager(mock(PinotHelixResourceManager.class), taskDriver);
CompletionServiceHelper httpHelper = mock(CompletionServiceHelper.class);
CompletionServiceHelper.CompletionServiceResponse httpResp =
new CompletionServiceHelper.CompletionServiceResponse();
when(httpHelper.doMultiGetRequest(any(), any(), anyBoolean(), any(), anyInt())).thenReturn(httpResp);
String[] workers = new String[]{"worker0", "worker1", "worker2"};
Map<String, String> workerEndpoints = new HashMap<>();
for (String worker : workers) {
workerEndpoints.put(worker, "http://" + worker + ":9000");
}
String taskName = "Task_SegmentGenerationAndPushTask_someone";
String[] subtaskNames = new String[3];
Map<String, Integer> taskIdPartitionMap = new HashMap<>();
for (int i = 0; i < 3; i++) {
String subtaskName = taskName + "_" + i;
subtaskNames[i] = subtaskName;
taskIdPartitionMap.put(subtaskName, i);
httpResp._httpResponses.put(workers[i],
JsonUtils.objectToString(Collections.singletonMap(subtaskNames[i], "running on worker: " + i)));
}
Map<String, TaskConfig> taskConfigMap = new HashMap<>();
for (String subtaskName : subtaskNames) {
taskConfigMap.put(subtaskName, mock(TaskConfig.class));
}
when(jobConfig.getTaskConfigMap()).thenReturn(taskConfigMap);
TaskPartitionState[] helixStates =
new TaskPartitionState[]{TaskPartitionState.INIT, TaskPartitionState.RUNNING, TaskPartitionState.TASK_ERROR};
when(jobContext.getTaskIdPartitionMap()).thenReturn(taskIdPartitionMap);
when(jobContext.getAssignedParticipant(anyInt())).thenAnswer(
invocation -> workers[(int) invocation.getArgument(0)]);
when(jobContext.getPartitionState(anyInt())).thenAnswer(invocation -> helixStates[(int) invocation.getArgument(0)]);
Map<String, Object> progress =
mgr.getSubtaskProgress(taskName, StringUtils.join(subtaskNames, ','), httpHelper, workerEndpoints,
Collections.emptyMap(), 1000);
for (int i = 0; i < 3; i++) {
String taskProgress = (String) progress.get(subtaskNames[i]);
assertEquals(taskProgress, "running on worker: " + i);
}
}
|
@Override
protected void watch(final Application application, final Local temporary, final FileWatcherListener listener, final ApplicationQuitCallback quit) throws IOException {
if(log.isDebugEnabled()) {
log.debug(String.format("Register %s in file watcher %s", temporary, watcher));
}
Uninterruptibles.awaitUninterruptibly(watcher.register(temporary, listener));
if(log.isDebugEnabled()) {
log.debug(String.format("Successfully registered %s in file watcher %s", temporary, watcher));
}
}
|
@Test(expected = NoSuchFileException.class)
public void testNotfound() throws Exception {
final DefaultWatchEditor editor = new DefaultWatchEditor(new Host(new TestProtocol()), new Path("/remote", EnumSet.of(Path.Type.file)), new DisabledListProgressListener());
editor.watch(new Application("com.app"), new Local(System.getProperty("java.io.tmpdir") + "/notfound", UUID.randomUUID().toString()), new DisabledFileWatcherListener(),
new DisabledApplicationQuitCallback());
}
|
public static Set<SingleDeprecatedRuleKey> from(RulesDefinition.Rule rule) {
rule.deprecatedRuleKeys();
return rule.deprecatedRuleKeys().stream()
.map(r -> new SingleDeprecatedRuleKey()
.setNewRepositoryKey(rule.repository().key())
.setNewRuleKey(rule.key())
.setOldRepositoryKey(r.repository())
.setOldRuleKey(r.rule()))
.collect(Collectors.toSet());
}
|
@Test
public void test_creation_from_RulesDefinitionRule() {
// Creation from RulesDefinition.Rule
ImmutableSet<RuleKey> deprecatedRuleKeys = ImmutableSet.of(
RuleKey.of(randomAlphanumeric(50), randomAlphanumeric(50)),
RuleKey.of(randomAlphanumeric(50), randomAlphanumeric(50)),
RuleKey.of(randomAlphanumeric(50), randomAlphanumeric(50)));
RulesDefinition.Repository repository = mock(RulesDefinition.Repository.class);
when(repository.key()).thenReturn(randomAlphanumeric(50));
RulesDefinition.Rule rule = mock(RulesDefinition.Rule.class);
when(rule.key()).thenReturn(randomAlphanumeric(50));
when(rule.deprecatedRuleKeys()).thenReturn(deprecatedRuleKeys);
when(rule.repository()).thenReturn(repository);
Set<SingleDeprecatedRuleKey> singleDeprecatedRuleKeys = SingleDeprecatedRuleKey.from(rule);
assertThat(singleDeprecatedRuleKeys).hasSize(deprecatedRuleKeys.size());
assertThat(singleDeprecatedRuleKeys)
.extracting(SingleDeprecatedRuleKey::getUuid, SingleDeprecatedRuleKey::getOldRepositoryKey, SingleDeprecatedRuleKey::getOldRuleKey,
SingleDeprecatedRuleKey::getNewRepositoryKey, SingleDeprecatedRuleKey::getNewRuleKey, SingleDeprecatedRuleKey::getOldRuleKeyAsRuleKey)
.containsExactlyInAnyOrder(
deprecatedRuleKeys.stream().map(
r -> tuple(null, r.repository(), r.rule(), rule.repository().key(), rule.key(), RuleKey.of(r.repository(), r.rule())))
.toList().toArray(new Tuple[deprecatedRuleKeys.size()]));
}
|
public static <T> T wrap(Callable<T> callable) {
try {
return callable.call();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
|
@Test(expected = RuntimeException.class)
public void testWrap() {
Assert.wrap(() -> {
int a = 1 / 0;
return true;
});
}
|
public FeatureSet addValue(String value) {
Objects.requireNonNull(value, "value");
values.add(value);
return this;
}
|
@Test
void requireThatValueIsMandatoryInSetter() {
FeatureSet node = new FeatureSet("foo", "bar");
try {
node.addValue(null);
fail();
} catch (NullPointerException e) {
assertEquals("value", e.getMessage());
}
assertValues(List.of("bar"), node);
}
|
@Override
public void calculate(TradePriceCalculateReqBO param, TradePriceCalculateRespBO result) {
// 默认使用积分为 0
result.setUsePoint(0);
// 1.1 校验是否使用积分
if (!BooleanUtil.isTrue(param.getPointStatus())) {
result.setUsePoint(0);
return;
}
// 1.2 校验积分抵扣是否开启
MemberConfigRespDTO config = memberConfigApi.getConfig();
if (!isDeductPointEnable(config)) {
return;
}
// 1.3 校验用户积分余额
MemberUserRespDTO user = memberUserApi.getUser(param.getUserId());
if (user.getPoint() == null || user.getPoint() <= 0) {
return;
}
// 2.1 计算积分优惠金额
int pointPrice = calculatePointPrice(config, user.getPoint(), result);
// 2.2 计算分摊的积分、抵扣金额
List<TradePriceCalculateRespBO.OrderItem> orderItems = filterList(result.getItems(), TradePriceCalculateRespBO.OrderItem::getSelected);
List<Integer> dividePointPrices = TradePriceCalculatorHelper.dividePrice(orderItems, pointPrice);
List<Integer> divideUsePoints = TradePriceCalculatorHelper.dividePrice(orderItems, result.getUsePoint());
// 3.1 记录优惠明细
TradePriceCalculatorHelper.addPromotion(result, orderItems,
param.getUserId(), "积分抵扣", PromotionTypeEnum.POINT.getType(),
StrUtil.format("积分抵扣:省 {} 元", TradePriceCalculatorHelper.formatPrice(pointPrice)),
dividePointPrices);
// 3.2 更新 SKU 优惠金额
for (int i = 0; i < orderItems.size(); i++) {
TradePriceCalculateRespBO.OrderItem orderItem = orderItems.get(i);
orderItem.setPointPrice(dividePointPrices.get(i));
orderItem.setUsePoint(divideUsePoints.get(i));
TradePriceCalculatorHelper.recountPayPrice(orderItem);
}
TradePriceCalculatorHelper.recountAllPrice(result);
}
|
@Test
public void testCalculate_PointStatusFalse() {
// 准备参数
TradePriceCalculateReqBO param = new TradePriceCalculateReqBO()
.setUserId(233L).setPointStatus(false) // 是否使用积分
.setItems(asList(
new TradePriceCalculateReqBO.Item().setSkuId(10L).setCount(2).setSelected(true), // 使用积分
new TradePriceCalculateReqBO.Item().setSkuId(20L).setCount(3).setSelected(true), // 使用积分
new TradePriceCalculateReqBO.Item().setSkuId(30L).setCount(5).setSelected(false) // 未选中,不使用积分
));
TradePriceCalculateRespBO result = new TradePriceCalculateRespBO()
.setType(TradeOrderTypeEnum.NORMAL.getType())
.setPrice(new TradePriceCalculateRespBO.Price())
.setPromotions(new ArrayList<>())
.setItems(asList(
new TradePriceCalculateRespBO.OrderItem().setSkuId(10L).setCount(2).setSelected(true)
.setPrice(100).setSpuId(1L),
new TradePriceCalculateRespBO.OrderItem().setSkuId(20L).setCount(3).setSelected(true)
.setPrice(50).setSpuId(2L),
new TradePriceCalculateRespBO.OrderItem().setSkuId(30L).setCount(5).setSelected(false)
.setPrice(30).setSpuId(3L)
));
// 保证价格被初始化上
TradePriceCalculatorHelper.recountPayPrice(result.getItems());
TradePriceCalculatorHelper.recountAllPrice(result);
// 调用
tradePointUsePriceCalculator.calculate(param, result);
// 断言:没有使用积分
assertNotUsePoint(result);
}
|
@Override
public List<String> getGroupIdList(int page, int pageSize) {
ConfigInfoMapper configInfoMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(),
TableConstant.CONFIG_INFO);
int from = (page - 1) * pageSize;
MapperResult mapperResult = configInfoMapper.getGroupIdList(new MapperContext(from, pageSize));
return jt.queryForList(mapperResult.getSql(), mapperResult.getParamList().toArray(), String.class);
}
|
@Test
void testGetGroupIdList() {
int page = 10;
int pageSize = 100;
//mock select config state
List<String> groupStrings = Arrays.asList("group1", "group2", "group3");
when(jdbcTemplate.queryForList(anyString(), eq(new Object[] {}), eq(String.class))).thenReturn(groupStrings);
//execute return mock obj
List<String> returnGroups = externalConfigInfoPersistService.getGroupIdList(page, pageSize);
//expect check
assertEquals(groupStrings, returnGroups);
}
|
public static Object typeConvert(String tableName ,String columnName, String value, int sqlType, String mysqlType) {
if (value == null
|| (value.equals("") && !(isText(mysqlType) || sqlType == Types.CHAR || sqlType == Types.VARCHAR || sqlType == Types.LONGVARCHAR))) {
return null;
}
try {
Object res;
switch (sqlType) {
case Types.INTEGER:
res = Integer.parseInt(value);
break;
case Types.SMALLINT:
res = Short.parseShort(value);
break;
case Types.BIT:
case Types.TINYINT:
res = Byte.parseByte(value);
break;
case Types.BIGINT:
if (mysqlType.startsWith("bigint") && mysqlType.endsWith("unsigned")) {
res = new BigInteger(value);
} else {
res = Long.parseLong(value);
}
break;
// case Types.BIT:
case Types.BOOLEAN:
res = !"0".equals(value);
break;
case Types.DOUBLE:
case Types.FLOAT:
res = Double.parseDouble(value);
break;
case Types.REAL:
res = Float.parseFloat(value);
break;
case Types.DECIMAL:
case Types.NUMERIC:
res = new BigDecimal(value);
break;
case Types.BINARY:
case Types.VARBINARY:
case Types.LONGVARBINARY:
case Types.BLOB:
res = value.getBytes("ISO-8859-1");
break;
case Types.DATE:
if (!value.startsWith("0000-00-00")) {
java.util.Date date = Util.parseDate(value);
if (date != null) {
res = new Date(date.getTime());
} else {
res = null;
}
} else {
res = null;
}
break;
case Types.TIME: {
java.util.Date date = Util.parseDate(value);
if (date != null) {
res = new Time(date.getTime());
} else {
res = null;
}
break;
}
case Types.TIMESTAMP:
if (!value.startsWith("0000-00-00")) {
java.util.Date date = Util.parseDate(value);
if (date != null) {
res = new Timestamp(date.getTime());
} else {
res = null;
}
} else {
res = null;
}
break;
case Types.CLOB:
default:
res = value;
break;
}
return res;
} catch (Exception e) {
logger.error("table: {} column: {}, failed convert type {} to {}", tableName, columnName, value, sqlType);
return value;
}
}
|
@Test
public void typeConvertInputNotNullNotNullNotNullPositiveNotNullOutputNotNull() {
// Arrange
final String tableName = "1";
final String columnName = "Bar";
final String value = "1";
final int sqlType = 12;
final String mysqlType = "2";
// Act
final Object actual =
JdbcTypeUtil.typeConvert(tableName, columnName, value, sqlType, mysqlType);
// Assert result
Assert.assertEquals("1", actual);
}
|
@Override
public LoadBalancer newLoadBalancer(final LoadBalancer.Helper helper) {
return new AbstractLoadBalancer(helper) {
@Override
protected AbstractReadyPicker newPicker(final List<Subchannel> list) {
return new RoundRobinPicker(list);
}
};
}
|
@Test
public void testNewLoadBalancer() {
LoadBalancer.Helper helper = mock(LoadBalancer.Helper.class);
LoadBalancer loadBalancer = roundRobinLoadBalancerProvider.newLoadBalancer(helper);
assertNotNull(loadBalancer);
}
|
public void setPreservePathElements(boolean preservePathElements) {
this.preservePathElements = preservePathElements;
}
|
@Test
public void testZipWithPreservedPathElements() throws Exception {
zip.setPreservePathElements(true);
getMockEndpoint("mock:zip").expectedBodiesReceived(getZippedTextInFolder("poems/", "poems/poem.txt"));
getMockEndpoint("mock:zip").expectedHeaderReceived(FILE_NAME, "poem.txt.zip");
template.sendBodyAndHeader("direct:zip", TEXT, FILE_NAME, "poems/poem.txt");
MockEndpoint.assertIsSatisfied(context);
}
|
public static TimelineEntity aggregateEntities(
TimelineEntities entities, String resultEntityId,
String resultEntityType, boolean needsGroupIdInResult) {
ConcurrentMap<String, AggregationStatusTable> aggregationGroups
= new ConcurrentHashMap<>();
updateAggregateStatus(entities, aggregationGroups, null);
if (needsGroupIdInResult) {
return aggregate(aggregationGroups, resultEntityId, resultEntityType);
} else {
return aggregateWithoutGroupId(
aggregationGroups, resultEntityId, resultEntityType);
}
}
|
@Test
void testAggregation() throws Exception {
// Test aggregation with multiple groups.
int groups = 3;
int n = 50;
TimelineEntities testEntities = generateTestEntities(groups, n);
TimelineEntity resultEntity = TimelineCollector.aggregateEntities(
testEntities, "test_result", "TEST_AGGR", true);
assertThat(resultEntity.getMetrics()).hasSize(groups * 3);
for (int i = 0; i < groups; i++) {
Set<TimelineMetric> metrics = resultEntity.getMetrics();
for (TimelineMetric m : metrics) {
if (m.getId().startsWith("HDFS_BYTES_WRITE")) {
assertEquals(100 * n, m.getSingleDataValue().intValue());
} else if (m.getId().startsWith("VCORES_USED")) {
assertEquals(3 * n, m.getSingleDataValue().intValue());
} else if (m.getId().startsWith("TXN_FINISH_TIME")) {
assertEquals(n - 1, m.getSingleDataValue());
} else {
fail("Unrecognized metric! " + m.getId());
}
}
}
// Test aggregation with a single group.
TimelineEntities testEntities1 = generateTestEntities(1, n);
TimelineEntity resultEntity1 = TimelineCollector.aggregateEntities(
testEntities1, "test_result", "TEST_AGGR", false);
assertThat(resultEntity1.getMetrics()).hasSize(3);
Set<TimelineMetric> metrics = resultEntity1.getMetrics();
for (TimelineMetric m : metrics) {
if (m.getId().equals("HDFS_BYTES_WRITE")) {
assertEquals(100 * n, m.getSingleDataValue().intValue());
} else if (m.getId().equals("VCORES_USED")) {
assertEquals(3 * n, m.getSingleDataValue().intValue());
} else if (m.getId().equals("TXN_FINISH_TIME")) {
assertEquals(n - 1, m.getSingleDataValue());
} else {
fail("Unrecognized metric! " + m.getId());
}
}
}
|
public static ExecutorConfig build(
String type,
String address,
Integer checkpoint,
Integer parallelism,
boolean useSqlFragment,
boolean useStatementSet,
boolean useBatchModel,
String savePointPath,
String jobName,
Map<String, String> config,
Map<String, String> variables) {
String host = null;
Integer port = null;
if (Asserts.isNotNullString(address)) {
String[] strings = address.split(":");
if (strings.length > 1) {
host = strings[0];
port = Integer.parseInt(strings[1]);
} else {
host = strings[0];
port = 8081;
}
}
return ExecutorConfig.builder()
.type(type)
.host(host)
.port(port)
.checkpoint(checkpoint)
.parallelism(parallelism)
.useSqlFragment(useSqlFragment)
.useStatementSet(useStatementSet)
.useBatchModel(useBatchModel)
.savePointPath(savePointPath)
.jobName(jobName)
.config(config)
.variables(variables)
.build();
}
|
@Test
void build() {}
|
@Override
public boolean supportsOpenStatementsAcrossRollback() {
return false;
}
|
@Test
void assertSupportsOpenStatementsAcrossRollback() {
assertFalse(metaData.supportsOpenStatementsAcrossRollback());
}
|
@Override
public Boolean run(final Session<?> session) throws BackgroundException {
final Metadata feature = session.getFeature(Metadata.class);
if(log.isDebugEnabled()) {
log.debug(String.format("Run with feature %s", feature));
}
for(Path file : files) {
if(this.isCanceled()) {
throw new ConnectionCanceledException();
}
this.write(session, feature, file);
}
return true;
}
|
@Test
public void testRunUpdated() throws Exception {
final List<Path> files = new ArrayList<>();
final Path p = new Path("a", EnumSet.of(Path.Type.file));
files.add(p);
final Map<String, String> previous = new HashMap<>();
previous.put("nullified", "hash");
previous.put("key", "v1");
p.attributes().setMetadata(previous);
final Map<String, String> updated = new HashMap<>();
updated.put("nullified", null);
updated.put("key", "v2");
WriteMetadataWorker worker = new WriteMetadataWorker(files, updated, false, new DisabledProgressListener()) {
@Override
public void cleanup(final Boolean map) {
fail();
}
};
final AtomicBoolean call = new AtomicBoolean();
worker.run(new NullSession(new Host(new TestProtocol())) {
@Override
@SuppressWarnings("unchecked")
public <T> T _getFeature(final Class<T> type) {
if(type == Metadata.class) {
return (T) new Metadata() {
@Override
public Map<String, String> getDefault(final Local local) {
return Collections.emptyMap();
}
@Override
public Map<String, String> getMetadata(final Path file) {
throw new UnsupportedOperationException();
}
@Override
public void setMetadata(final Path file, final TransferStatus status) {
assertTrue(status.getMetadata().containsKey("nullified"));
assertTrue(status.getMetadata().containsKey("key"));
assertEquals("v2", status.getMetadata().get("key"));
assertEquals("hash", status.getMetadata().get("nullified"));
call.set(true);
}
};
}
return super._getFeature(type);
}
});
assertTrue(call.get());
}
|
@PreAuthorize(value = "@permissionValidator.isSuperAdmin()")
@PostMapping("/users")
public void createOrUpdateUser(
@RequestParam(value = "isCreate", defaultValue = "false") boolean isCreate,
@RequestBody UserPO user) {
if (StringUtils.isContainEmpty(user.getUsername(), user.getPassword())) {
throw new BadRequestException("Username and password can not be empty.");
}
CheckResult pwdCheckRes = passwordChecker.checkWeakPassword(user.getPassword());
if (!pwdCheckRes.isSuccess()) {
throw new BadRequestException(pwdCheckRes.getMessage());
}
if (userService instanceof SpringSecurityUserService) {
if (isCreate) {
((SpringSecurityUserService) userService).create(user);
} else {
((SpringSecurityUserService) userService).update(user);
}
} else {
throw new UnsupportedOperationException("Create or update user operation is unsupported");
}
}
|
@Test
public void testCreateOrUpdateUser() {
UserPO user = new UserPO();
user.setUsername("username");
user.setPassword("password");
Mockito.when(userPasswordChecker.checkWeakPassword(Mockito.anyString()))
.thenReturn(new CheckResult(Boolean.TRUE, ""));
userInfoController.createOrUpdateUser(true, user);
}
|
@SuppressWarnings("unchecked")
@Override
public void configure(Map<String, ?> configs) throws KafkaException {
try {
this.configs = configs;
if (connectionMode == ConnectionMode.SERVER) {
createServerCallbackHandlers(configs);
createConnectionsMaxReauthMsMap(configs);
} else
createClientCallbackHandler(configs);
for (Map.Entry<String, AuthenticateCallbackHandler> entry : saslCallbackHandlers.entrySet()) {
String mechanism = entry.getKey();
entry.getValue().configure(configs, mechanism, jaasContexts.get(mechanism).configurationEntries());
}
Class<? extends Login> defaultLoginClass = defaultLoginClass();
if (connectionMode == ConnectionMode.SERVER && jaasContexts.containsKey(SaslConfigs.GSSAPI_MECHANISM)) {
String defaultRealm;
try {
defaultRealm = defaultKerberosRealm();
} catch (Exception ke) {
defaultRealm = "";
}
List<String> principalToLocalRules = (List<String>) configs.get(BrokerSecurityConfigs.SASL_KERBEROS_PRINCIPAL_TO_LOCAL_RULES_CONFIG);
if (principalToLocalRules != null)
kerberosShortNamer = KerberosShortNamer.fromUnparsedRules(defaultRealm, principalToLocalRules);
}
for (Map.Entry<String, JaasContext> entry : jaasContexts.entrySet()) {
String mechanism = entry.getKey();
// With static JAAS configuration, use KerberosLogin if Kerberos is enabled. With dynamic JAAS configuration,
// use KerberosLogin only for the LoginContext corresponding to GSSAPI
LoginManager loginManager = LoginManager.acquireLoginManager(entry.getValue(), mechanism, defaultLoginClass, configs);
loginManagers.put(mechanism, loginManager);
Subject subject = loginManager.subject();
subjects.put(mechanism, subject);
if (connectionMode == ConnectionMode.SERVER && mechanism.equals(SaslConfigs.GSSAPI_MECHANISM))
maybeAddNativeGssapiCredentials(subject);
}
if (this.securityProtocol == SecurityProtocol.SASL_SSL) {
// Disable SSL client authentication as we are using SASL authentication
this.sslFactory = new SslFactory(connectionMode, sslClientAuthOverride, isInterBrokerListener);
this.sslFactory.configure(configs);
}
} catch (Throwable e) {
close();
throw new KafkaException(e);
}
}
|
@Test
public void testClientChannelBuilderWithBrokerConfigs() throws Exception {
Map<String, Object> configs = new HashMap<>();
CertStores certStores = new CertStores(false, "client", "localhost");
configs.putAll(certStores.getTrustingConfig(certStores));
configs.put(SaslConfigs.SASL_KERBEROS_SERVICE_NAME, "kafka");
configs.putAll(new ConfigDef().withClientSaslSupport().parse(configs));
for (Field field : BrokerSecurityConfigs.class.getFields()) {
if (field.getName().endsWith("_CONFIG"))
configs.put(field.get(BrokerSecurityConfigs.class).toString(), "somevalue");
}
SaslChannelBuilder plainBuilder = createChannelBuilder(SecurityProtocol.SASL_PLAINTEXT, "PLAIN");
plainBuilder.configure(configs);
SaslChannelBuilder gssapiBuilder = createChannelBuilder(SecurityProtocol.SASL_PLAINTEXT, "GSSAPI");
gssapiBuilder.configure(configs);
SaslChannelBuilder oauthBearerBuilder = createChannelBuilder(SecurityProtocol.SASL_PLAINTEXT, "OAUTHBEARER");
oauthBearerBuilder.configure(configs);
SaslChannelBuilder scramBuilder = createChannelBuilder(SecurityProtocol.SASL_PLAINTEXT, "SCRAM-SHA-256");
scramBuilder.configure(configs);
SaslChannelBuilder saslSslBuilder = createChannelBuilder(SecurityProtocol.SASL_SSL, "PLAIN");
saslSslBuilder.configure(configs);
}
|
@Override
public SmileResponse<T> handle(Request request, Response response)
{
byte[] bytes = readResponseBytes(response);
String contentType = response.getHeader(CONTENT_TYPE);
if ((contentType == null) || !MediaType.parse(contentType).is(MEDIA_TYPE_SMILE)) {
return new SmileResponse<>(response.getStatusCode(), response.getHeaders(), bytes);
}
return new SmileResponse<>(response.getStatusCode(), response.getHeaders(), smileCodec, bytes);
}
|
@Test
public void testInvalidSmile()
{
byte[] invalidSmileBytes = "test".getBytes(UTF_8);
SmileResponse<User> response = handler.handle(null, mockResponse(OK, MEDIA_TYPE_SMILE, invalidSmileBytes));
assertFalse(response.hasValue());
assertEquals(response.getException().getMessage(), format("Unable to create %s from SMILE response", User.class));
assertTrue(response.getException().getCause() instanceof IllegalArgumentException);
assertEquals(response.getException().getCause().getMessage(), "Invalid SMILE bytes for [simple type, class com.facebook.presto.server.smile.TestFullSmileResponseHandler$User]");
assertEquals(response.getSmileBytes(), invalidSmileBytes);
assertEquals(response.getResponseBytes(), response.getSmileBytes());
}
|
public final RequestSender post() {
return request(HttpMethod.POST);
}
|
@Test
void testIssue777() {
disposableServer = createServer()
.route(r ->
r.post("/empty", (req, res) -> {
// Just consume the incoming body
req.receive().subscribe();
return res.status(400)
.header(HttpHeaderNames.CONNECTION, "close")
.send(Mono.empty());
})
.post("/test", (req, res) -> {
// Just consume the incoming body
req.receive().subscribe();
return res.status(400)
.header(HttpHeaderNames.CONNECTION, "close")
.sendString(Mono.just("Test"));
}))
.bindNow();
HttpClient client = createHttpClientForContextWithAddress();
BiFunction<HttpClientResponse, ByteBufMono, Mono<String>> receiver =
(resp, bytes) -> {
if (!Objects.equals(HttpResponseStatus.OK, resp.status())) {
return bytes.asString()
.switchIfEmpty(Mono.just(resp.status().reasonPhrase()))
.flatMap(text -> Mono.error(new RuntimeException(text)));
}
return bytes.asString();
};
doTestIssue777_1(client, "/empty", "Bad Request", receiver);
doTestIssue777_1(client, "/test", "Test", receiver);
receiver = (resp, bytes) -> {
if (Objects.equals(HttpResponseStatus.OK, resp.status())) {
return bytes.asString();
}
return Mono.error(new RuntimeException("error"));
};
doTestIssue777_1(client, "/empty", "error", receiver);
doTestIssue777_1(client, "/test", "error", receiver);
BiFunction<HttpClientResponse, ByteBufMono, Mono<Tuple2<String, HttpClientResponse>>> receiver1 =
(resp, byteBuf) ->
Mono.zip(byteBuf.asString(StandardCharsets.UTF_8)
.switchIfEmpty(Mono.just(resp.status().reasonPhrase())),
Mono.just(resp));
doTestIssue777_2(client, "/empty", "Bad Request", receiver1);
doTestIssue777_2(client, "/test", "Test", receiver1);
receiver =
(resp, bytes) -> bytes.asString(StandardCharsets.UTF_8)
.switchIfEmpty(Mono.just(resp.status().reasonPhrase()))
.map(respBody -> {
if (!Objects.equals(HttpResponseStatus.OK, resp.status())) {
throw new RuntimeException(respBody);
}
return respBody;
});
doTestIssue777_1(client, "/empty", "Bad Request", receiver);
doTestIssue777_1(client, "/test", "Test", receiver);
}
|
static S3ResourceId fromUri(String uri) {
Matcher m = S3_URI.matcher(uri);
checkArgument(m.matches(), "Invalid S3 URI: [%s]", uri);
String scheme = m.group("SCHEME");
String bucket = m.group("BUCKET");
String key = Strings.nullToEmpty(m.group("KEY"));
if (!key.startsWith("/")) {
key = "/" + key;
}
return fromComponents(scheme, bucket, key);
}
|
@Test
public void testInvalidPathNoBucket() {
thrown.expect(IllegalArgumentException.class);
thrown.expectMessage("Invalid S3 URI: [s3://]");
S3ResourceId.fromUri("s3://");
}
|
@SuppressWarnings("FutureReturnValueIgnored")
public static Mono<Channel> bind(TransportConfig config, ChannelInitializer<Channel> channelInitializer,
SocketAddress bindAddress, boolean isDomainSocket) {
Objects.requireNonNull(config, "config");
Objects.requireNonNull(bindAddress, "bindAddress");
Objects.requireNonNull(channelInitializer, "channelInitializer");
return doInitAndRegister(config, channelInitializer, isDomainSocket, config.eventLoopGroup().next())
.flatMap(channel -> {
MonoChannelPromise promise = new MonoChannelPromise(channel);
// "FutureReturnValueIgnored" this is deliberate
channel.eventLoop().execute(() -> channel.bind(bindAddress, promise.unvoid()));
return promise;
});
}
|
@Test
@SuppressWarnings("FutureReturnValueIgnored")
void bind_whenBindException_thenChannelIsUnregistered() {
final TcpClientConfig transportConfig = TcpClient.newConnection().configuration();
final Channel channel1 = TransportConnector.bind(
transportConfig,
new RecordingChannelInitializer(),
new InetSocketAddress("localhost", 0),
false).block(Duration.ofSeconds(5));
final RecordingChannelInitializer channelInitializer = new RecordingChannelInitializer();
assertThatThrownBy(() -> TransportConnector.bind(
transportConfig,
channelInitializer,
new InetSocketAddress("localhost", ((InetSocketAddress) channel1.localAddress()).getPort()),
false).block(Duration.ofSeconds(5)));
final Channel channel2 = channelInitializer.channel;
assertThat(channel1.isRegistered()).isTrue();
assertThat(channel2.isRegistered()).isFalse();
channel1.close();
}
|
@Override
public HttpResponseOutputStream<StorageObject> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
// Submit store run to background thread
final DelayedHttpEntityCallable<StorageObject> command = new DelayedHttpEntityCallable<StorageObject>(file) {
/**
* @return The ETag returned by the server for the uploaded object
*/
@Override
public StorageObject call(final HttpEntity entity) throws BackgroundException {
try {
// Previous
final HashMap<String, String> headers = new HashMap<>(status.getMetadata());
if(status.isExists()) {
// Remove any large object header read from metadata of existing file
headers.remove(Constants.X_STATIC_LARGE_OBJECT);
}
final Checksum checksum = status.getChecksum();
final String etag = session.getClient().storeObject(
regionService.lookup(file),
containerService.getContainer(file).getName(), containerService.getKey(file),
entity, headers, checksum.algorithm == HashAlgorithm.md5 ? checksum.hash : null);
if(log.isDebugEnabled()) {
log.debug(String.format("Saved object %s with checksum %s", file, etag));
}
final StorageObject stored = new StorageObject(containerService.getKey(file));
stored.setMd5sum(etag);
stored.setSize(status.getLength());
return stored;
}
catch(GenericException e) {
throw new SwiftExceptionMappingService().map("Upload {0} failed", e, file);
}
catch(IOException e) {
throw new DefaultIOExceptionMappingService().map("Upload {0} failed", e, file);
}
}
@Override
public long getContentLength() {
return status.getLength();
}
};
return this.write(file, status, command);
}
|
@Test
public void testWrite() throws Exception {
final TransferStatus status = new TransferStatus();
status.setMime("text/plain");
final byte[] content = "test".getBytes(StandardCharsets.UTF_8);
status.setLength(content.length);
final Path container = new Path("test.cyberduck.ch", EnumSet.of(Path.Type.directory, Path.Type.volume));
container.attributes().setRegion("IAD");
final Path test = new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final SwiftRegionService regionService = new SwiftRegionService(session);
status.setMetadata(Collections.singletonMap("C", "duck"));
final StatusOutputStream<StorageObject> out = new SwiftWriteFeature(session, regionService).write(test, status, new DisabledConnectionCallback());
assertNotNull(out);
new StreamCopier(new TransferStatus(), new TransferStatus()).transfer(new ByteArrayInputStream(content), out);
out.close();
assertEquals(new SwiftAttributesFinderFeature(session).toAttributes(out.getStatus()).getChecksum(), new SwiftAttributesFinderFeature(session).find(test).getChecksum());
assertTrue(new SwiftFindFeature(session).find(test));
final PathAttributes attributes = new SwiftListService(session, regionService).list(test.getParent(), new DisabledListProgressListener()).get(test).attributes();
assertEquals(content.length, attributes.getSize());
final byte[] buffer = new byte[content.length];
final InputStream in = new SwiftReadFeature(session, regionService).read(test, new TransferStatus(), new DisabledConnectionCallback());
IOUtils.readFully(in, buffer);
in.close();
assertArrayEquals(content, buffer);
final Map<String, String> metadata = new SwiftMetadataFeature(session).getMetadata(test);
assertFalse(metadata.isEmpty());
assertEquals("text/plain", metadata.get("Content-Type"));
assertEquals("duck", metadata.get("X-Object-Meta-C"));
final OutputStream overwrite = new SwiftWriteFeature(session, regionService).write(test, new TransferStatus().withLength(0L), new DisabledConnectionCallback());
overwrite.close();
new SwiftDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
@Override
public void onSuccess(T result)
{
_future.complete(result);
}
|
@Test
public void testSuccess()
{
CompletableFuture<String> future = new CompletableFuture<>();
CompletableFutureCallbackAdapter<String> adapter = new CompletableFutureCallbackAdapter<>(future);
adapter.onSuccess("haha");
assertTrue(future.isDone());
assertFalse(future.isCompletedExceptionally());
assertFalse(future.isCancelled());
assertEquals(future.join(), "haha");
}
|
public static MonitoringInfoMetricName named(String urn, Map<String, String> labels) {
return new MonitoringInfoMetricName(urn, labels);
}
|
@Test
public void testEmptyUrnThrows() {
HashMap<String, String> labels = new HashMap<String, String>();
thrown.expect(IllegalArgumentException.class);
MonitoringInfoMetricName.named("", labels);
}
|
@Override
public ConfigInfo findConfigInfo(long id) {
try {
ConfigInfoMapper configInfoMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(),
TableConstant.CONFIG_INFO);
return this.jt.queryForObject(configInfoMapper.select(
Arrays.asList("id", "data_id", "group_id", "tenant_id", "app_name", "content"),
Collections.singletonList("id")), new Object[] {id}, CONFIG_INFO_ROW_MAPPER);
} catch (EmptyResultDataAccessException e) { // Indicates that the data does not exist, returns null.
return null;
} catch (CannotGetJdbcConnectionException e) {
LogUtil.FATAL_LOG.error("[db-error] " + e, e);
throw e;
}
}
|
@Test
void testFindConfigInfoById() {
long id = 1234567890876L;
ConfigInfo configInfo = new ConfigInfo();
configInfo.setId(id);
Mockito.when(jdbcTemplate.queryForObject(anyString(), eq(new Object[] {id}), eq(CONFIG_INFO_ROW_MAPPER))).thenReturn(configInfo);
ConfigInfo configReturn = externalConfigInfoPersistService.findConfigInfo(id);
assertEquals(id, configReturn.getId());
}
|
public boolean verifyTopicCleanupPolicyOnlyCompact(String topic, String workerTopicConfig,
String topicPurpose) {
Set<String> cleanupPolicies = topicCleanupPolicy(topic);
if (cleanupPolicies.isEmpty()) {
log.info("Unable to use admin client to verify the cleanup policy of '{}' "
+ "topic is '{}', either because the broker is an older "
+ "version or because the Kafka principal used for Connect "
+ "internal topics does not have the required permission to "
+ "describe topic configurations.", topic, TopicConfig.CLEANUP_POLICY_COMPACT);
return false;
}
Set<String> expectedPolicies = Collections.singleton(TopicConfig.CLEANUP_POLICY_COMPACT);
if (!cleanupPolicies.equals(expectedPolicies)) {
String expectedPolicyStr = String.join(",", expectedPolicies);
String cleanupPolicyStr = String.join(",", cleanupPolicies);
String msg = String.format("Topic '%s' supplied via the '%s' property is required "
+ "to have '%s=%s' to guarantee consistency and durability of "
+ "%s, but found the topic currently has '%s=%s'. Continuing would likely "
+ "result in eventually losing %s and problems restarting this Connect "
+ "cluster in the future. Change the '%s' property in the "
+ "Connect worker configurations to use a topic with '%s=%s'.",
topic, workerTopicConfig, TopicConfig.CLEANUP_POLICY_CONFIG, expectedPolicyStr,
topicPurpose, TopicConfig.CLEANUP_POLICY_CONFIG, cleanupPolicyStr, topicPurpose,
workerTopicConfig, TopicConfig.CLEANUP_POLICY_CONFIG, expectedPolicyStr);
throw new ConfigException(msg);
}
return true;
}
|
@Test
public void verifyingTopicCleanupPolicyShouldFailWhenTopicHasDeletePolicy() {
String topicName = "myTopic";
Map<String, String> topicConfigs = Collections.singletonMap("cleanup.policy", "delete");
Cluster cluster = createCluster(1);
try (MockAdminClient mockAdminClient = new MockAdminClient(cluster.nodes(), cluster.nodeById(0))) {
TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), Collections.emptyList());
mockAdminClient.addTopic(false, topicName, Collections.singletonList(topicPartitionInfo), topicConfigs);
TopicAdmin admin = new TopicAdmin(mockAdminClient);
ConfigException e = assertThrows(ConfigException.class, () -> admin.verifyTopicCleanupPolicyOnlyCompact("myTopic", "worker.topic", "purpose"));
assertTrue(e.getMessage().contains("to guarantee consistency and durability"));
}
}
|
@GetMapping("/rules")
public ShenyuAdminResult listPageRuleDataPermissions(@RequestParam("currentPage") final Integer currentPage,
@RequestParam("pageSize") final Integer pageSize,
@RequestParam("userId") final String userId,
@RequestParam("selectorId") final String selectorId,
@RequestParam(value = "name", required = false) final String name) {
CommonPager<DataPermissionPageVO> selectorList = dataPermissionService.listRulesByPage(
new RuleQuery(selectorId, name, new PageParameter(currentPage, pageSize)), userId);
return ShenyuAdminResult.success(ShenyuResultMessage.QUERY_SUCCESS, selectorList);
}
|
@Test
public void listPageRuleDataPermissions() throws Exception {
Integer currentPage = 1;
Integer pageSize = 10;
String userId = "testUserId";
String selectorId = "testSelectorId";
String name = "testName";
final PageParameter pageParameter = new PageParameter(currentPage, pageSize);
final CommonPager<DataPermissionPageVO> commonPager = new CommonPager<>(pageParameter, Collections.singletonList(dataPermissionPageVO));
given(this.dataPermissionService.listRulesByPage(
new RuleQuery(selectorId, name, pageParameter), userId)).willReturn(commonPager);
this.mockMvc.perform(MockMvcRequestBuilders.get("/data-permission/rules")
.param("currentPage", String.valueOf(currentPage))
.param("pageSize", String.valueOf(pageSize))
.param("userId", userId)
.param("selectorId", selectorId)
.param("name", name))
.andExpect(status().isOk())
.andExpect(jsonPath("$.message", is(ShenyuResultMessage.QUERY_SUCCESS)))
.andExpect(jsonPath("$.data.dataList[0].dataId", is(dataPermissionPageVO.getDataId())))
.andReturn();
}
|
@VisibleForTesting
static Set<AbsoluteUnixPath> getVolumesSet(RawConfiguration rawConfiguration)
throws InvalidContainerVolumeException {
Set<AbsoluteUnixPath> volumes = new HashSet<>();
for (String path : rawConfiguration.getVolumes()) {
try {
AbsoluteUnixPath absoluteUnixPath = AbsoluteUnixPath.get(path);
volumes.add(absoluteUnixPath);
} catch (IllegalArgumentException exception) {
throw new InvalidContainerVolumeException(path, path, exception);
}
}
return volumes;
}
|
@Test
public void testGetInvalidVolumesList() {
when(rawConfiguration.getVolumes()).thenReturn(Collections.singletonList("`some/root"));
InvalidContainerVolumeException exception =
assertThrows(
InvalidContainerVolumeException.class,
() -> PluginConfigurationProcessor.getVolumesSet(rawConfiguration));
assertThat(exception).hasMessageThat().isEqualTo("`some/root");
assertThat(exception.getInvalidVolume()).isEqualTo("`some/root");
}
|
@Override
public Optional<Map<String, EncryptionInformation>> getReadEncryptionInformation(
ConnectorSession session,
Table table,
Optional<Set<HiveColumnHandle>> requestedColumns,
Map<String, Partition> partitions)
{
Optional<DwrfTableEncryptionProperties> encryptionProperties = getTableEncryptionProperties(table);
if (!encryptionProperties.isPresent()) {
return Optional.empty();
}
Optional<Map<String, String>> fieldToKeyReference = getFieldToKeyReference(encryptionProperties.get(), requestedColumns);
if (!fieldToKeyReference.isPresent()) {
return Optional.empty();
}
return Optional.of(getReadEncryptionInformationInternal(session, table, requestedColumns, partitions, fieldToKeyReference.get(), encryptionProperties.get()));
}
|
@Test
public void testGetReadEncryptionInformationForPartitionedTableWithTableLevelEncryptionAndNoRequestedColumns()
{
Table table = createTable(DWRF, Optional.of(forTable("key1", "algo", "provider")), true);
Optional<Map<String, EncryptionInformation>> encryptionInformation = encryptionInformationSource.getReadEncryptionInformation(
SESSION,
table,
Optional.of(ImmutableSet.of()),
ImmutableMap.of(
"ds=2020-01-01", new Partition("dbName", "tableName", ImmutableList.of("2020-01-01"), table.getStorage(), table.getDataColumns(), ImmutableMap.of(), Optional.empty(), false, true, 0, 0, Optional.empty()),
"ds=2020-01-02", new Partition("dbName", "tableName", ImmutableList.of("2020-01-02"), table.getStorage(), table.getDataColumns(), ImmutableMap.of(), Optional.empty(), false, true, 0, 0, Optional.empty())));
assertTrue(encryptionInformation.isPresent());
assertEquals(
encryptionInformation.get(),
ImmutableMap.of(
"ds=2020-01-01", EncryptionInformation.fromEncryptionMetadata(DwrfEncryptionMetadata.forPerField(ImmutableMap.of(), ImmutableMap.of(TEST_EXTRA_METADATA, "ds=2020-01-01"), "algo", "provider")),
"ds=2020-01-02", EncryptionInformation.fromEncryptionMetadata(DwrfEncryptionMetadata.forPerField(ImmutableMap.of(), ImmutableMap.of(TEST_EXTRA_METADATA, "ds=2020-01-02"), "algo", "provider"))));
}
|
@Override
public void preflight(final Path source, final Path target) throws BackgroundException {
if(source.isRoot() || new DeepboxPathContainerService(session).isContainer(source) || new DeepboxPathContainerService(session).isInTrash(source)) {
throw new AccessDeniedException(MessageFormat.format(LocaleFactory.localizedString("Cannot rename {0}", "Error"), source.getName())).withFile(source);
}
if(target.isRoot() || new DeepboxPathContainerService(session).isContainer(target) || new DeepboxPathContainerService(session).isInTrash(target)) {
throw new AccessDeniedException(MessageFormat.format(LocaleFactory.localizedString("Cannot create {0}", "Error"), target.getName()));
}
final Acl acl = source.attributes().getAcl();
if(Acl.EMPTY == acl) {
// Missing initialization
log.warn(String.format("Unknown ACLs on %s", source));
return;
}
if(!source.getName().equals(target.getName())) {
if(!acl.get(new Acl.CanonicalUser()).contains(CANRENAME)) {
if(log.isWarnEnabled()) {
log.warn(String.format("ACL %s for %s does not include %s", acl, source, CANRENAME));
}
throw new AccessDeniedException(MessageFormat.format(LocaleFactory.localizedString("Cannot rename {0}", "Error"), source.getName())).withFile(source);
}
}
if(!fileid.getFileId(source.getParent()).equals(fileid.getFileId(target.getParent()))) {
if(fileid.getBoxNodeId(source.getParent()).equals(fileid.getBoxNodeId(target.getParent()))) {
if(!acl.get(new Acl.CanonicalUser()).contains(CANMOVEWITHINBOX)) {
if(log.isWarnEnabled()) {
log.warn(String.format("ACL %s for %s does not include %s", acl, source, CANMOVEWITHINBOX));
}
throw new AccessDeniedException(MessageFormat.format(LocaleFactory.localizedString("Cannot rename {0}", "Error"), source.getName())).withFile(source);
}
}
else {
if(!acl.get(new Acl.CanonicalUser()).contains(CANMOVEOUTOFBOX)) {
if(log.isWarnEnabled()) {
log.warn(String.format("ACL %s for %s does not include %s", acl, source, CANMOVEOUTOFBOX));
}
throw new AccessDeniedException(MessageFormat.format(LocaleFactory.localizedString("Cannot rename {0}", "Error"), source.getName())).withFile(source);
}
}
}
}
|
@Test
public void testNoMoveRenameDocuments() throws Exception {
final DeepboxIdProvider nodeid = new DeepboxIdProvider(session);
final Path documents = new Path("/ORG 1 - DeepBox Desktop App/ORG1:Box1/Documents", EnumSet.of(Path.Type.directory, Path.Type.volume));
final PathAttributes attributes = new DeepboxAttributesFinderFeature(session, nodeid).find(documents);
assertFalse(attributes.getAcl().get(new Acl.CanonicalUser()).contains(CANMOVEWITHINBOX));
assertFalse(attributes.getAcl().get(new Acl.CanonicalUser()).contains(CANMOVEOUTOFBOX));
assertFalse(attributes.getAcl().get(new Acl.CanonicalUser()).contains(CANRENAME));
assertThrows(AccessDeniedException.class, () -> new DeepboxMoveFeature(session, nodeid).preflight(documents, new Path(String.format("/ORG 1 - DeepBox Desktop App/%s", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.directory, Path.Type.volume))));
assertThrows(AccessDeniedException.class, () -> new DeepboxMoveFeature(session, nodeid).preflight(documents, new Path(String.format("/ORG 4 - DeepBox Desktop App/%s", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.directory, Path.Type.volume))));
}
|
public static int compareBsonValue(BsonValue o1, BsonValue o2) {
return compareBsonValue(o1, o2, true);
}
|
@Test
public void testCompareBsonValue() {
// test compare Decimal128
assertTrue(
BsonUtils.compareBsonValue(
new BsonDecimal128(Decimal128.parse("18")),
new BsonDecimal128(Decimal128.parse("17")))
> 0);
assertEquals(
0,
BsonUtils.compareBsonValue(
new BsonDecimal128(Decimal128.parse("17")),
new BsonDecimal128(Decimal128.parse("17"))));
assertTrue(
BsonUtils.compareBsonValue(
new BsonDecimal128(Decimal128.parse("16")),
new BsonDecimal128(Decimal128.parse("17")))
< 0);
// test compare String
assertTrue(
BsonUtils.compareBsonValue(new BsonString("apple"), new BsonString("banana")) < 0);
assertEquals(
0, BsonUtils.compareBsonValue(new BsonString("banana"), new BsonString("banana")));
assertTrue(
BsonUtils.compareBsonValue(new BsonString("cherry"), new BsonString("banana")) > 0);
// test compare Array
assertTrue(
BsonUtils.compareBsonValue(
new BsonArray(
Arrays.asList(
new BsonString("fruit"), new BsonString("apple"))),
new BsonArray(
Arrays.asList(
new BsonString("fruit"), new BsonString("banana"))))
< 0);
assertEquals(
0,
BsonUtils.compareBsonValue(
new BsonArray(
Arrays.asList(new BsonString("fruit"), new BsonString("banana"))),
new BsonArray(
Arrays.asList(new BsonString("fruit"), new BsonString("banana")))));
assertTrue(
BsonUtils.compareBsonValue(
new BsonArray(
Arrays.asList(
new BsonString("fruit"), new BsonString("cherry"))),
new BsonArray(
Arrays.asList(
new BsonString("fruit"), new BsonString("banana"))))
> 0);
// According to https://www.mongodb.com/docs/manual/reference/bson-type-comparison-order/
// only smallest value will be compared
assertEquals(
0,
BsonUtils.compareBsonValue(
new BsonArray(
Arrays.asList(new BsonString("apple"), new BsonString("cherry"))),
new BsonArray(
Arrays.asList(new BsonString("apple"), new BsonString("banana")))));
// all arrays will be sorted before comparison
assertEquals(
0,
BsonUtils.compareBsonValue(
new BsonArray(
Arrays.asList(new BsonString("apple"), new BsonString("banana"))),
new BsonArray(
Arrays.asList(new BsonString("banana"), new BsonString("apple")))));
// only smallest value in each array will be compared
// in this case, apple < banana
assertTrue(
BsonUtils.compareBsonValue(
new BsonArray(
Arrays.asList(
new BsonString("cherry"), new BsonString("apple"))),
new BsonArray(
Arrays.asList(
new BsonString("cherry"),
new BsonString("banana"))))
< 0);
// test compare Binary
assertTrue(
BsonUtils.compareBsonValue(
new BsonBinary("apple".getBytes()),
new BsonBinary("banana".getBytes()))
< 0);
assertEquals(
0,
BsonUtils.compareBsonValue(
new BsonBinary("banana".getBytes()), new BsonBinary("banana".getBytes())));
assertTrue(
BsonUtils.compareBsonValue(
new BsonBinary("cherry".getBytes()),
new BsonBinary("banana".getBytes()))
> 0);
// test compare Boolean
assertTrue(BsonUtils.compareBsonValue(new BsonBoolean(false), new BsonBoolean(true)) < 0);
assertEquals(0, BsonUtils.compareBsonValue(new BsonBoolean(true), new BsonBoolean(true)));
assertTrue(BsonUtils.compareBsonValue(new BsonBoolean(true), new BsonBoolean(false)) > 0);
// test compare DateTime
assertTrue(
BsonUtils.compareBsonValue(
new BsonDateTime(1600000000), new BsonDateTime(1700000000))
< 0);
assertEquals(
0,
BsonUtils.compareBsonValue(
new BsonDateTime(1700000000), new BsonDateTime(1700000000)));
assertTrue(
BsonUtils.compareBsonValue(
new BsonDateTime(1800000000), new BsonDateTime(1700000000))
> 0);
// test compare document
assertTrue(
BsonUtils.compareBsonValue(
new BsonDocument("fruit", new BsonString("apple")),
new BsonDocument("fruit", new BsonString("banana")))
< 0);
assertEquals(
0,
BsonUtils.compareBsonValue(
new BsonDocument("fruit", new BsonString("banana")),
new BsonDocument("fruit", new BsonString("banana"))));
assertTrue(
BsonUtils.compareBsonValue(
new BsonDocument("fruit", new BsonString("cherry")),
new BsonDocument("fruit", new BsonString("banana")))
> 0);
// test compare Timestamp
assertTrue(
BsonUtils.compareBsonValue(
new BsonTimestamp(1600000000), new BsonTimestamp(1700000000))
< 0);
assertEquals(
0,
BsonUtils.compareBsonValue(
new BsonTimestamp(1700000000), new BsonTimestamp(1700000000)));
assertTrue(
BsonUtils.compareBsonValue(
new BsonTimestamp(1800000000), new BsonTimestamp(1700000000))
> 0);
// test compare RegEx
assertTrue(
BsonUtils.compareBsonValue(
new BsonRegularExpression("[a-xA-X]"),
new BsonRegularExpression("[b-yB-Y]"))
< 0);
assertEquals(
0,
BsonUtils.compareBsonValue(
new BsonRegularExpression("[b-yB-Y]"),
new BsonRegularExpression("[b-yB-Y]")));
assertTrue(
BsonUtils.compareBsonValue(
new BsonRegularExpression("[c-zC-Z]"),
new BsonRegularExpression("[b-yB-Y]"))
> 0);
// test compare JavaScript
assertTrue(
BsonUtils.compareBsonValue(
new BsonJavaScriptWithScope(
"console.log('apple');", new BsonDocument()),
new BsonJavaScriptWithScope(
"console.log('banana');", new BsonDocument()))
< 0);
assertEquals(
0,
BsonUtils.compareBsonValue(
new BsonJavaScriptWithScope("console.log('banana');", new BsonDocument()),
new BsonJavaScriptWithScope("console.log('banana');", new BsonDocument())));
assertTrue(
BsonUtils.compareBsonValue(
new BsonJavaScriptWithScope(
"console.log('cherry');", new BsonDocument()),
new BsonJavaScriptWithScope(
"console.log('banana');", new BsonDocument()))
> 0);
// test compare JavaScript with different scope
assertTrue(
BsonUtils.compareBsonValue(
new BsonJavaScriptWithScope(
"console.log('apple');",
new BsonDocument("_id", new BsonString("apple"))),
new BsonJavaScriptWithScope(
"console.log('apple');",
new BsonDocument("_id", new BsonString("banana"))))
< 0);
assertEquals(
0,
BsonUtils.compareBsonValue(
new BsonJavaScriptWithScope(
"console.log('apple');",
new BsonDocument("_id", new BsonString("banana"))),
new BsonJavaScriptWithScope(
"console.log('apple');",
new BsonDocument("_id", new BsonString("banana")))));
assertTrue(
BsonUtils.compareBsonValue(
new BsonJavaScriptWithScope(
"console.log('apple');",
new BsonDocument("_id", new BsonString("cherry"))),
new BsonJavaScriptWithScope(
"console.log('apple');",
new BsonDocument("_id", new BsonString("banana"))))
> 0);
// test inter-type comparison
assertTrue(BsonUtils.compareBsonValue(new BsonNull(), new BsonString("")) < 0);
assertTrue(BsonUtils.compareBsonValue(new BsonBoolean(true), new BsonString("")) > 0);
// test null comparison
assertEquals(0, BsonUtils.compareBsonValue(new BsonNull(), new BsonNull()));
assertEquals(0, BsonUtils.compareBsonValue(new BsonUndefined(), new BsonUndefined()));
assertTrue(BsonUtils.compareBsonValue(new BsonUndefined(), new BsonNull()) < 0);
}
|
@Override
public String toString() {
StringBuilder bld = new StringBuilder();
bld.append("Assignment");
bld.append("(topicIdPartition=").append(topicIdPartition);
bld.append(", directoryId=").append(directoryId);
bld.append(", submissionTimeNs=").append(submissionTimeNs);
bld.append(", successCallback=").append(successCallback);
bld.append(")");
return bld.toString();
}
|
@Test
public void testAssignmentToString() {
assertEquals("Assignment(topicIdPartition=rTudty6ITOCcO_ldVyzZYg:1, " +
"directoryId=rzRT8XZaSbKsP6j238zogg, " +
"submissionTimeNs=123, " +
"successCallback=NoOpRunnable)",
new Assignment(new TopicIdPartition(Uuid.fromString("rTudty6ITOCcO_ldVyzZYg"), 1),
Uuid.fromString("rzRT8XZaSbKsP6j238zogg"),
123,
NoOpRunnable.INSTANCE).toString());
}
|
@Override
public void report(final SortedMap<MetricName, Gauge> gauges, final SortedMap<MetricName, Counter> counters,
final SortedMap<MetricName, Histogram> histograms, final SortedMap<MetricName, Meter> meters, final SortedMap<MetricName, Timer> timers) {
final long now = System.currentTimeMillis();
if(logger.isDebugEnabled()) logger.debug("InfluxDbReporter report is called with counter size " + counters.size());
try {
influxDb.flush();
for (Map.Entry<MetricName, Gauge> entry : gauges.entrySet()) {
reportGauge(entry.getKey(), entry.getValue(), now);
}
for (Map.Entry<MetricName, Counter> entry : counters.entrySet()) {
reportCounter(entry.getKey(), entry.getValue(), now);
}
for (Map.Entry<MetricName, Histogram> entry : histograms.entrySet()) {
reportHistogram(entry.getKey(), entry.getValue(), now);
}
for (Map.Entry<MetricName, Meter> entry : meters.entrySet()) {
reportMeter(entry.getKey(), entry.getValue(), now);
}
for (Map.Entry<MetricName, Timer> entry : timers.entrySet()) {
reportTimer(entry.getKey(), entry.getValue(), now);
}
if (influxDb.hasSeriesData()) {
influxDb.writeData();
}
// reset counters
for (Map.Entry<MetricName, Counter> entry : counters.entrySet()) {
Counter counter = entry.getValue();
long count = counter.getCount();
counter.dec(count);
}
} catch (Exception e) {
logger.error("Unable to report to InfluxDB. Discarding data.", e);
}
}
|
@Test
public void reportsHistograms() throws Exception {
final Histogram histogram = mock(Histogram.class);
when(histogram.getCount()).thenReturn(1L);
final Snapshot snapshot = mock(Snapshot.class);
when(snapshot.getMax()).thenReturn(2L);
when(snapshot.getMean()).thenReturn(3.0);
when(snapshot.getMin()).thenReturn(4L);
when(snapshot.getStdDev()).thenReturn(5.0);
when(snapshot.getMedian()).thenReturn(6.0);
when(snapshot.get75thPercentile()).thenReturn(7.0);
when(snapshot.get95thPercentile()).thenReturn(8.0);
when(snapshot.get98thPercentile()).thenReturn(9.0);
when(snapshot.get99thPercentile()).thenReturn(10.0);
when(snapshot.get999thPercentile()).thenReturn(11.0);
when(histogram.getSnapshot()).thenReturn(snapshot);
reporter.report(this.map(), this.map(), this.map("histogram", histogram), this.map(), this.map());
final ArgumentCaptor<InfluxDbPoint> influxDbPointCaptor = ArgumentCaptor.forClass(InfluxDbPoint.class);
Mockito.verify(influxDb, atLeastOnce()).appendPoints(influxDbPointCaptor.capture());
InfluxDbPoint point = influxDbPointCaptor.getValue();
/*
assertThat(point.getMeasurement()).isEqualTo("histogram");
assertThat(point.getFields()).isNotEmpty();
assertThat(point.getFields()).hasSize(13);
assertThat(point.getFields()).contains(entry("max", 2L));
assertThat(point.getFields()).contains(entry("mean", 3.0));
assertThat(point.getFields()).contains(entry("min", 4L));
assertThat(point.getFields()).contains(entry("std-dev", 5.0));
assertThat(point.getFields()).contains(entry("median", 6.0));
assertThat(point.getFields()).contains(entry("75-percentile", 7.0));
assertThat(point.getFields()).contains(entry("95-percentile", 8.0));
assertThat(point.getFields()).contains(entry("98-percentile", 9.0));
assertThat(point.getFields()).contains(entry("99-percentile", 10.0));
assertThat(point.getFields()).contains(entry("999-percentile", 11.0));
*/
}
|
@Override
public Iterable<ConnectorFactory> getConnectorFactories()
{
return ImmutableList.of(new KuduConnectorFactory());
}
|
@Test
public void testCreateConnector()
throws Exception
{
Plugin plugin = new KuduPlugin();
ConnectorFactory factory = getOnlyElement(plugin.getConnectorFactories());
factory.create("test", ImmutableMap.of("kudu.client.master-addresses", "localhost:7051"), new TestingConnectorContext());
}
|
public static <FnT extends DoFn<?, ?>> DoFnSignature getSignature(Class<FnT> fn) {
return signatureCache.computeIfAbsent(fn, DoFnSignatures::parseSignature);
}
|
@Test
public void testWrongTimestampType() throws Exception {
thrown.expect(IllegalArgumentException.class);
thrown.expectMessage("@Timestamp argument must have type org.joda.time.Instant");
DoFnSignatures.getSignature(
new DoFn<String, String>() {
@ProcessElement
public void process(@Timestamp String timestamp) {}
}.getClass());
}
|
@Override
public String getLogChannelId() {
return log.getLogChannelId();
}
|
@Ignore
@Test
public void testLoggingObjectIsNotLeakInTrans() throws Exception {
Repository rep = mock( Repository.class );
RepositoryDirectoryInterface repInt = Mockito.mock( RepositoryDirectoryInterface.class );
when(
rep.loadTransformation( anyString(), any( RepositoryDirectoryInterface.class ),
nullable( ProgressMonitorListener.class ), anyBoolean(), nullable( String.class ) ) ).thenReturn( meta );
when( rep.findDirectory( anyString() ) ).thenReturn( repInt );
Trans trans = new Trans( meta, rep, "junit", "junitDir", "fileName" );
assertEquals( "Log channel General assigned", LogChannel.GENERAL.getLogChannelId(), trans.log
.getLogChannelId() );
}
|
@Override
public void createIngress(Ingress ingress) {
checkNotNull(ingress, ERR_NULL_INGRESS);
checkArgument(!Strings.isNullOrEmpty(ingress.getMetadata().getUid()),
ERR_NULL_INGRESS_UID);
k8sIngressStore.createIngress(ingress);
log.info(String.format(MSG_INGRESS, ingress.getMetadata().getName(), MSG_CREATED));
}
|
@Test(expected = IllegalArgumentException.class)
public void testCreateDuplicateIngress() {
target.createIngress(INGRESS);
target.createIngress(INGRESS);
}
|
public static String getSBln( boolean... blnA ) {
//
String Info = "";
//
if ( blnA == null ) return "?";
if ( blnA.length == 0 ) return "?";
//
for ( int K = 0; K < blnA.length; K ++ ) {
//
Info += ( blnA[ K ] )? "T" : "F";
}
//
return Info;
}
|
@Test
public void testgetSBln() throws Exception {
//
assertEquals( "?", BTools.getSBln() );
assertEquals( "?", BTools.getSBln( null ) );
assertEquals( "T", BTools.getSBln( true ) );
assertEquals( "F", BTools.getSBln( false ) );
assertEquals( "TFFT", BTools.getSBln( true, false, false, true ) );
assertEquals( "FTFFT", BTools.getSBln( false, true, false, false, true ) );
//
}
|
@Override
public boolean supports(Job job) {
if (jobActivator == null) return false;
JobDetails jobDetails = job.getJobDetails();
return !jobDetails.hasStaticFieldName() && jobActivator.activateJob(toClass(jobDetails.getClassName())) != null;
}
|
@Test
void supportsJobIfJobClassIsKnownInIoC() {
Job job = anEnqueuedJob()
.withJobDetails(defaultJobDetails())
.build();
assertThat(backgroundIoCJobWithIocRunner.supports(job)).isTrue();
}
|
public static Range<Comparable<?>> safeClosed(final Comparable<?> lowerEndpoint, final Comparable<?> upperEndpoint) {
try {
return Range.closed(lowerEndpoint, upperEndpoint);
} catch (final ClassCastException ex) {
Optional<Class<?>> clazz = getTargetNumericType(Arrays.asList(lowerEndpoint, upperEndpoint));
if (!clazz.isPresent()) {
throw ex;
}
return Range.closed(parseNumberByClazz(lowerEndpoint.toString(), clazz.get()), parseNumberByClazz(upperEndpoint.toString(), clazz.get()));
}
}
|
@Test
void assertSafeClosedForDouble() {
Range<Comparable<?>> range = SafeNumberOperationUtils.safeClosed(5.12F, 13.75);
assertThat(range.lowerEndpoint(), is(5.12));
assertThat(range.upperEndpoint(), is(13.75));
}
|
@Override
public void deleteAll() {
try {
this.indexWriter.deleteAll();
this.searcherManager.maybeRefreshBlocking();
this.indexWriter.commit();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
|
@Test
void shouldDeleteAll() throws IOException {
this.searchEngine.deleteAll();
verify(this.indexWriter).deleteAll();
verify(this.searcherManager).maybeRefreshBlocking();
verify(this.indexWriter).commit();
}
|
@Override
public AppResponse process(Flow flow, MrzDocumentRequest params) {
if(!(params.getDocumentType().equals("PASSPORT") || params.getDocumentType().equals("ID_CARD"))){
return new NokResponse();
}
Map<String, String> travelDocument = Map.of(
"documentNumber", params.getDocumentNumber(),
"dateOfBirth", params.getDateOfBirth(),
"dateOfExpiry", params.getDateOfExpiry());
digidClient.remoteLog("867", Map.of(lowerUnderscore(ACCOUNT_ID), appSession.getAccountId(), HIDDEN, true));
appSession.setRdaSessionStatus("DOCUMENTS_RECEIVED");
Map<String, String> rdaSession = rdaClient.startSession(
returnUrl.concat("/iapi/rda/confirm"),
appSession.getId(), params.getIpAddress(), List.of(travelDocument), List.of());
if(rdaSession.isEmpty()){
digidClient.remoteLog("873", Map.of(lowerUnderscore(ACCOUNT_ID), appSession.getAccountId(), HIDDEN, true));
return new NokResponse();
}
appSession.setConfirmSecret(rdaSession.get("confirmSecret"));
appSession.setUrl(rdaSession.get("url"));
appSession.setRdaSessionId(rdaSession.get("sessionId"));
appSession.setRdaSessionTimeoutInSeconds(rdaSession.get("expiration"));
appSession.setRdaSessionStatus("SCANNING_FOREIGN");
appSession.setRdaDocumentType(params.getDocumentType());
appSession.setRdaDocumentNumber(params.getDocumentNumber());
digidClient.remoteLog("868", Map.of(lowerUnderscore(ACCOUNT_ID), appSession.getAccountId(), HIDDEN, true));
return new RdaResponse(appSession.getUrl(), appSession.getRdaSessionId());
}
|
@Test
public void processValid1() {
//given
MrzDocumentRequest mrzDocumentRequest = new MrzDocumentRequest();
mrzDocumentRequest.setDocumentType("PASSPORT");
mrzDocumentRequest.setDateOfBirth("test");
mrzDocumentRequest.setDateOfExpiry("test");
mrzDocumentRequest.setDocumentNumber("dfdf");
when(rdaClient.startSession(anyString(), anyString(), any(), any(), any())).thenReturn(rdaResponse);
//when
AppResponse appResponse = mrzDocumentInitialized.process(mockedFlow, mrzDocumentRequest);
//then
assertEquals("SCANNING_FOREIGN", mockedAppSession.getRdaSessionStatus());
assertEquals(SESSION_ID, ((RdaResponse)appResponse).getSessionId());
assertEquals(RDA_URL, ((RdaResponse)appResponse).getUrl());
}
|
public static String fullTableName(String catalogName, TableIdentifier identifier) {
StringBuilder sb = new StringBuilder();
if (catalogName.contains("/") || catalogName.contains(":")) {
// use / for URI-like names: thrift://host:port/db.table
sb.append(catalogName);
if (!catalogName.endsWith("/")) {
sb.append("/");
}
} else {
// use . for non-URI named catalogs: prod.db.table
sb.append(catalogName).append(".");
}
for (String level : identifier.namespace().levels()) {
sb.append(level).append(".");
}
sb.append(identifier.name());
return sb.toString();
}
|
@Test
public void fullTableNameWithDifferentValues() {
String uriTypeCatalogName = "thrift://host:port/db.table";
String namespace = "ns";
String nameSpaceWithTwoLevels = "ns.l2";
String tableName = "tbl";
TableIdentifier tableIdentifier = TableIdentifier.of(namespace, tableName);
assertThat(CatalogUtil.fullTableName(uriTypeCatalogName, tableIdentifier))
.isEqualTo(uriTypeCatalogName + "/" + namespace + "." + tableName);
tableIdentifier = TableIdentifier.of(nameSpaceWithTwoLevels, tableName);
assertThat(CatalogUtil.fullTableName(uriTypeCatalogName, tableIdentifier))
.isEqualTo(uriTypeCatalogName + "/" + nameSpaceWithTwoLevels + "." + tableName);
assertThat(CatalogUtil.fullTableName(uriTypeCatalogName + "/", tableIdentifier))
.isEqualTo(uriTypeCatalogName + "/" + nameSpaceWithTwoLevels + "." + tableName);
String nonUriCatalogName = "test.db.catalog";
assertThat(CatalogUtil.fullTableName(nonUriCatalogName, tableIdentifier))
.isEqualTo(nonUriCatalogName + "." + nameSpaceWithTwoLevels + "." + tableName);
String pathStyleCatalogName = "/test/db";
assertThat(CatalogUtil.fullTableName(pathStyleCatalogName, tableIdentifier))
.isEqualTo(pathStyleCatalogName + "/" + nameSpaceWithTwoLevels + "." + tableName);
}
|
public static FactoryBuilder newFactoryBuilder() {
return new FactoryBuilder();
}
|
@Test void equalsAndHashCode() {
// same instance are equivalent
Propagation.Factory factory = B3Propagation.newFactoryBuilder()
.injectFormat(Span.Kind.CLIENT, Format.SINGLE_NO_PARENT)
.injectFormat(Span.Kind.SERVER, Format.SINGLE_NO_PARENT)
.build();
assertThat(factory).isEqualTo(factory);
// same formats are equivalent
Propagation.Factory sameFields = B3Propagation.newFactoryBuilder()
.injectFormat(Span.Kind.CLIENT, Format.SINGLE_NO_PARENT)
.injectFormat(Span.Kind.SERVER, Format.SINGLE_NO_PARENT)
.build();
assertThat(factory.equals(sameFields)).isTrue();
assertThat(sameFields).isEqualTo(factory);
assertThat(sameFields).hasSameHashCodeAs(factory);
// different formats are not equivalent
assertThat(factory).isNotEqualTo(B3Propagation.FACTORY);
assertThat(B3Propagation.FACTORY).isNotEqualTo(factory);
assertThat(factory.hashCode()).isNotEqualTo(B3Propagation.FACTORY.hashCode());
}
|
@Override
public void closeDiscountActivity(Long id) {
// 校验存在
DiscountActivityDO activity = validateDiscountActivityExists(id);
if (activity.getStatus().equals(CommonStatusEnum.DISABLE.getStatus())) { // 已关闭的活动,不能关闭噢
throw exception(DISCOUNT_ACTIVITY_CLOSE_FAIL_STATUS_CLOSED);
}
// 更新
DiscountActivityDO updateObj = new DiscountActivityDO().setId(id).setStatus(PromotionActivityStatusEnum.CLOSE.getStatus());
discountActivityMapper.updateById(updateObj);
}
|
@Test
public void testCloseDiscountActivity() {
// mock 数据
DiscountActivityDO dbDiscountActivity = randomPojo(DiscountActivityDO.class,
o -> o.setStatus(PromotionActivityStatusEnum.WAIT.getStatus()));
discountActivityMapper.insert(dbDiscountActivity);// @Sql: 先插入出一条存在的数据
// 准备参数
Long id = dbDiscountActivity.getId();
// 调用
discountActivityService.closeDiscountActivity(id);
// 校验状态
DiscountActivityDO discountActivity = discountActivityMapper.selectById(id);
assertEquals(discountActivity.getStatus(), PromotionActivityStatusEnum.CLOSE.getStatus());
}
|
static String getRelativeFileInternal(File canonicalBaseFile, File canonicalFileToRelativize) {
List<String> basePath = getPathComponents(canonicalBaseFile);
List<String> pathToRelativize = getPathComponents(canonicalFileToRelativize);
//if the roots aren't the same (i.e. different drives on a windows machine), we can't construct a relative
//path from one to the other, so just return the canonical file
if (!basePath.get(0).equals(pathToRelativize.get(0))) {
return canonicalFileToRelativize.getPath();
}
int commonDirs;
StringBuilder sb = new StringBuilder();
for (commonDirs=1; commonDirs<basePath.size() && commonDirs<pathToRelativize.size(); commonDirs++) {
if (!basePath.get(commonDirs).equals(pathToRelativize.get(commonDirs))) {
break;
}
}
boolean first = true;
for (int i=commonDirs; i<basePath.size(); i++) {
if (!first) {
sb.append(File.separatorChar);
} else {
first = false;
}
sb.append("..");
}
first = true;
for (int i=commonDirs; i<pathToRelativize.size(); i++) {
if (first) {
if (sb.length() != 0) {
sb.append(File.separatorChar);
}
first = false;
} else {
sb.append(File.separatorChar);
}
sb.append(pathToRelativize.get(i));
}
if (sb.length() == 0) {
return ".";
}
return sb.toString();
}
|
@Test
public void pathUtilTest14() {
File[] roots = File.listRoots();
File basePath = new File(roots[0] + "some" + File.separatorChar);
File relativePath = new File(roots[0] + "some" + File.separatorChar + "dir" + File.separatorChar + "dir2");
String path = PathUtil.getRelativeFileInternal(basePath, relativePath);
Assert.assertEquals(path, "dir" + File.separatorChar + "dir2");
}
|
@Override
public KsMaterializedQueryResult<WindowedRow> get(
final GenericKey key,
final int partition,
final Range<Instant> windowStartBounds,
final Range<Instant> windowEndBounds,
final Optional<Position> position
) {
try {
final ReadOnlyWindowStore<GenericKey, ValueAndTimestamp<GenericRow>> store = stateStore
.store(QueryableStoreTypes.timestampedWindowStore(), partition);
final Instant lower = calculateLowerBound(windowStartBounds, windowEndBounds);
final Instant upper = calculateUpperBound(windowStartBounds, windowEndBounds);
try (WindowStoreIterator<ValueAndTimestamp<GenericRow>> it
= cacheBypassFetcher.fetch(store, key, lower, upper)) {
final Builder<WindowedRow> builder = ImmutableList.builder();
while (it.hasNext()) {
final KeyValue<Long, ValueAndTimestamp<GenericRow>> next = it.next();
final Instant windowStart = Instant.ofEpochMilli(next.key);
if (!windowStartBounds.contains(windowStart)) {
continue;
}
final Instant windowEnd = windowStart.plus(windowSize);
if (!windowEndBounds.contains(windowEnd)) {
continue;
}
final TimeWindow window =
new TimeWindow(windowStart.toEpochMilli(), windowEnd.toEpochMilli());
final WindowedRow row = WindowedRow.of(
stateStore.schema(),
new Windowed<>(key, window),
next.value.value(),
next.value.timestamp()
);
builder.add(row);
}
return KsMaterializedQueryResult.rowIterator(builder.build().iterator());
}
} catch (final Exception e) {
throw new MaterializationException("Failed to get value from materialized table", e);
}
}
|
@Test
public void shouldFetchWithOnlyEndBounds() {
// When:
table.get(A_KEY, PARTITION, Range.all(), WINDOW_END_BOUNDS);
// Then:
verify(cacheBypassFetcher).fetch(
eq(tableStore),
any(),
eq(WINDOW_END_BOUNDS.lowerEndpoint().minus(WINDOW_SIZE)),
eq(WINDOW_END_BOUNDS.upperEndpoint().minus(WINDOW_SIZE))
);
}
|
public static String getType(String fileStreamHexHead) {
if(StrUtil.isBlank(fileStreamHexHead)){
return null;
}
if (MapUtil.isNotEmpty(FILE_TYPE_MAP)) {
for (final Entry<String, String> fileTypeEntry : FILE_TYPE_MAP.entrySet()) {
if (StrUtil.startWithIgnoreCase(fileStreamHexHead, fileTypeEntry.getKey())) {
return fileTypeEntry.getValue();
}
}
}
byte[] bytes = HexUtil.decodeHex(fileStreamHexHead);
return FileMagicNumber.getMagicNumber(bytes).getExtension();
}
|
@Test
@Disabled
public void ofdTest() {
final File file = FileUtil.file("e:/test.ofd");
final String hex = IoUtil.readHex64Upper(FileUtil.getInputStream(file));
Console.log(hex);
final String type = FileTypeUtil.getType(file);
Console.log(type);
assertEquals("ofd", type);
}
|
@PublicAPI(usage = ACCESS)
public JavaClasses importUrl(URL url) {
return importUrls(singletonList(url));
}
|
@Test
public void reflect_works() {
JavaClasses classes = new ClassFileImporter().importUrl(getClass().getResource("testexamples/innerclassimport"));
JavaClass calledClass = classes.get(CalledClass.class);
assertThat(calledClass.reflect()).isEqualTo(CalledClass.class);
assertThat(calledClass.getField("someString").reflect()).isEqualTo(field(CalledClass.class, "someString"));
assertThat(calledClass.getConstructor().reflect()).isEqualTo(constructor(CalledClass.class));
assertThat(calledClass.getConstructor(String.class).reflect()).isEqualTo(constructor(CalledClass.class, String.class));
assertThat(calledClass.getCodeUnitWithParameterTypes(CONSTRUCTOR_NAME, String.class).reflect())
.isEqualTo(constructor(CalledClass.class, String.class));
JavaClass innerClass = classes.get(ClassWithInnerClass.Inner.class);
assertThat(innerClass.reflect()).isEqualTo(ClassWithInnerClass.Inner.class);
assertThat(innerClass.getMethod("call").reflect())
.isEqualTo(method(ClassWithInnerClass.Inner.class, "call"));
}
|
@Override
public void put(final Bytes key,
final byte[] valueAndTimestamp) {
wrapped().put(key, valueAndTimestamp);
log(key, rawValue(valueAndTimestamp), valueAndTimestamp == null ? context.timestamp() : timestamp(valueAndTimestamp));
}
|
@Test
public void shouldReturnOldValueOnDelete() {
store.put(hi, rawThere);
assertThat(store.delete(hi), equalTo(rawThere));
}
|
public static String convertToString(Object parsedValue, Type type) {
if (parsedValue == null) {
return null;
}
if (type == null) {
return parsedValue.toString();
}
switch (type) {
case BOOLEAN:
case SHORT:
case INT:
case LONG:
case DOUBLE:
case STRING:
case PASSWORD:
return parsedValue.toString();
case LIST:
List<?> valueList = (List<?>) parsedValue;
return valueList.stream().map(Object::toString).collect(Collectors.joining(","));
case CLASS:
Class<?> clazz = (Class<?>) parsedValue;
return clazz.getName();
default:
throw new IllegalStateException("Unknown type.");
}
}
|
@Test
public void testConvertValueToStringList() {
assertEquals("a,bc,d", ConfigDef.convertToString(Arrays.asList("a", "bc", "d"), Type.LIST));
assertNull(ConfigDef.convertToString(null, Type.LIST));
}
|
@Override
public ListenableFuture<BufferResult> get(OutputBufferId outputBufferId, long startingSequenceId, DataSize maxSize)
{
checkState(!Thread.holdsLock(this), "Can not get pages while holding a lock on this");
requireNonNull(outputBufferId, "outputBufferId is null");
checkArgument(maxSize.toBytes() > 0, "maxSize must be at least 1 byte");
return getBuffer(outputBufferId).getPages(startingSequenceId, maxSize);
}
|
@Test
public void testAcknowledgementFreesWriters()
{
BroadcastOutputBuffer buffer = createBroadcastBuffer(
createInitialEmptyOutputBuffers(BROADCAST)
.withBuffer(FIRST, BROADCAST_PARTITION_ID)
.withBuffer(SECOND, BROADCAST_PARTITION_ID)
.withNoMoreBufferIds(),
sizeOfPages(2));
// Add two pages, buffer is full
addPage(buffer, createPage(1));
addPage(buffer, createPage(2));
assertQueueState(buffer, FIRST, 2, 0);
// third page is blocked
ListenableFuture<?> future = enqueuePage(buffer, createPage(3));
// we should be blocked
assertFalse(future.isDone());
assertQueueState(buffer, FIRST, 3, 0);
assertQueueState(buffer, SECOND, 3, 0);
// acknowledge pages for first buffer, no space is freed
buffer.get(FIRST, 2, sizeOfPages(10)).cancel(true);
assertFalse(future.isDone());
// acknowledge pages for second buffer, which makes space in the buffer
buffer.get(SECOND, 2, sizeOfPages(10)).cancel(true);
// writer should not be blocked
assertFutureIsDone(future);
assertQueueState(buffer, SECOND, 1, 2);
}
|
public static SourceOperationResponse performSplit(
SourceSplitRequest request, PipelineOptions options) throws Exception {
return performSplitWithApiLimit(
request, options, DEFAULT_NUM_BUNDLES_LIMIT, DATAFLOW_SPLIT_RESPONSE_API_SIZE_LIMIT);
}
|
@Test
public void testTooLargeSplitResponseFails() throws Exception {
com.google.api.services.dataflow.model.Source source =
translateIOToCloudSource(CountingSource.upTo(1000), options);
expectedException.expectMessage("[0, 1000)");
expectedException.expectMessage("larger than the limit 100");
performSplit(source, options, 8L, 10, 100L);
}
|
public void loadUdtfFromClass(
final Class<?> theClass,
final String path
) {
final UdtfDescription udtfDescriptionAnnotation = theClass.getAnnotation(UdtfDescription.class);
if (udtfDescriptionAnnotation == null) {
throw new KsqlException(String.format("Cannot load class %s. Classes containing UDTFs must"
+ "be annotated with @UdtfDescription.", theClass.getName()));
}
final String functionName = udtfDescriptionAnnotation.name();
final String sensorName = "ksql-udtf-" + functionName;
FunctionMetrics.initInvocationSensor(metrics, sensorName, "ksql-udtf", functionName + " udtf");
final UdfMetadata metadata = new UdfMetadata(
udtfDescriptionAnnotation.name(),
udtfDescriptionAnnotation.description(),
udtfDescriptionAnnotation.author(),
udtfDescriptionAnnotation.version(),
udtfDescriptionAnnotation.category(),
path
);
final TableFunctionFactory factory = new TableFunctionFactory(metadata);
for (final Method method : theClass.getMethods()) {
if (method.getAnnotation(Udtf.class) != null) {
final Udtf annotation = method.getAnnotation(Udtf.class);
try {
if (method.getReturnType() != List.class) {
throw new KsqlException(String
.format("UDTF functions must return a List. Class %s Method %s",
theClass.getName(), method.getName()
));
}
final Type ret = method.getGenericReturnType();
if (!(ret instanceof ParameterizedType)) {
throw new KsqlException(String
.format(
"UDTF functions must return a parameterized List. Class %s Method %s",
theClass.getName(), method.getName()
));
}
final Type typeArg = ((ParameterizedType) ret).getActualTypeArguments()[0];
final ParamType returnType = FunctionLoaderUtils
.getReturnType(method, typeArg, annotation.schema(), typeParser);
final List<ParameterInfo> parameters = FunctionLoaderUtils
.createParameters(method, functionName, typeParser);
final KsqlTableFunction tableFunction =
createTableFunction(method, FunctionName.of(functionName), returnType,
parameters,
annotation.description(),
annotation
);
factory.addFunction(tableFunction);
} catch (final KsqlException e) {
if (throwExceptionOnLoadFailure) {
throw e;
} else {
LOGGER.warn(
"Failed to add UDTF to the MetaStore. name={} method={}",
udtfDescriptionAnnotation.name(),
method,
e
);
}
}
}
}
functionRegistry.addTableFunctionFactory(factory);
}
|
@Test
public void shouldNotLoadUdtfWithWrongReturnValue() {
// Given:
final MutableFunctionRegistry functionRegistry = new InternalFunctionRegistry();
final SqlTypeParser typeParser = create(EMPTY);
final UdtfLoader udtfLoader = new UdtfLoader(
functionRegistry, empty(), typeParser, true
);
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> udtfLoader.loadUdtfFromClass(UdtfBadReturnValue.class, INTERNAL_PATH)
);
// Then:
assertThat(e.getMessage(), containsString(
"UDTF functions must return a List. Class io.confluent.ksql"
+ ".function.UdtfLoaderTest$UdtfBadReturnValue Method badReturn"));
}
|
public boolean addAll(Collection<? extends NODE> c) {
throw e;
}
|
@Test
void require_that_addAllindex_throws_exception() {
assertThrows(NodeVector.ReadOnlyException.class, () -> new TestNodeVector("foo").addAll(0, List.of(barNode())));
}
|
public void updateConsumerOffsetOneway(
final String addr,
final UpdateConsumerOffsetRequestHeader requestHeader,
final long timeoutMillis
) throws RemotingConnectException, RemotingTooMuchRequestException, RemotingTimeoutException, RemotingSendRequestException,
InterruptedException {
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.UPDATE_CONSUMER_OFFSET, requestHeader);
this.remotingClient.invokeOneway(MixAll.brokerVIPChannel(this.clientConfig.isVipChannelEnabled(), addr), request, timeoutMillis);
}
|
@Test
public void testUpdateConsumerOffsetOneway() throws RemotingException, InterruptedException {
UpdateConsumerOffsetRequestHeader requestHeader = mock(UpdateConsumerOffsetRequestHeader.class);
mqClientAPI.updateConsumerOffsetOneway(defaultBrokerAddr, requestHeader, defaultTimeout);
}
|
public static <T> T createDelegatingProxy(Class<T> clazz, final Object delegate) {
final Class delegateClass = delegate.getClass();
return (T)
Proxy.newProxyInstance(
clazz.getClassLoader(),
new Class[] {clazz},
(proxy, method, args) -> {
try {
Method delegateMethod =
findDelegateMethod(delegateClass, method.getName(), method.getParameterTypes());
delegateMethod.setAccessible(true);
return delegateMethod.invoke(delegate, args);
} catch (NoSuchMethodException e) {
return PRIMITIVE_RETURN_VALUES.get(method.getReturnType().getName());
} catch (InvocationTargetException e) {
// Required to propagate the correct throwable.
throw e.getTargetException();
}
});
}
|
@Test
public void createDelegatingProxy_wrongParamType() {
DelegatingProxyFixture fixture =
ReflectionHelpers.createDelegatingProxy(DelegatingProxyFixture.class, new Delegate());
// verify the mismatched delegate method doesn't get matched
assertThat(fixture.delegateMethodWrongParamType("value")).isNull();
}
|
public static Field p(String fieldName) {
return SELECT_ALL_FROM_SOURCES_ALL.where(fieldName);
}
|
@Test
void validate_positive_search_term_of_strings() {
Query q = Q.p(Q.p("k1").contains("v1").and("k2").contains("v2").andnot("k3").contains("v3"))
.andnot(Q.p("nk1").contains("nv1").and("nk2").contains("nv2").andnot("nk3").contains("nv3"))
.and(Q.p("k4").contains("v4")
.andnot(Q.p("k5").contains("v5").andnot("k6").contains("v6"))
);
assertTrue(q.hasPositiveSearchField("k1"));
assertTrue(q.hasPositiveSearchField("k2"));
assertTrue(q.hasPositiveSearchField("nk3"));
assertTrue(q.hasPositiveSearchField("k6"));
assertTrue(q.hasPositiveSearchField("k6", "v6"));
assertFalse(q.hasPositiveSearchField("k6", "v5"));
assertTrue(q.hasNegativeSearchField("k3"));
assertTrue(q.hasNegativeSearchField("nk1"));
assertTrue(q.hasNegativeSearchField("nk2"));
assertTrue(q.hasNegativeSearchField("k5"));
assertTrue(q.hasNegativeSearchField("k5", "v5"));
assertFalse(q.hasNegativeSearchField("k5", "v4"));
}
|
public static FusedPipeline fuse(Pipeline p) {
return new GreedyPipelineFuser(p).fusedPipeline;
}
|
@Test
public void parDoWithTimerRootsStage() {
// (impulse.out) -> parDo -> (parDo.out)
// (parDo.out) -> timer -> timer.out
// timer has a timer spec which prevents it from fusing with an upstream ParDo
PTransform parDoTransform =
PTransform.newBuilder()
.setUniqueName("ParDo")
.putInputs("input", "impulse.out")
.putOutputs("output", "parDo.out")
.setSpec(
FunctionSpec.newBuilder()
.setUrn(PTransformTranslation.PAR_DO_TRANSFORM_URN)
.setPayload(
ParDoPayload.newBuilder()
.setDoFn(FunctionSpec.newBuilder())
.build()
.toByteString()))
.setEnvironmentId("common")
.build();
PTransform timerTransform =
PTransform.newBuilder()
.setUniqueName("TimerParDo")
.putInputs("input", "parDo.out")
.putInputs("timer", "timer.out")
.putOutputs("timer", "timer.out")
.putOutputs("output", "output.out")
.setSpec(
FunctionSpec.newBuilder()
.setUrn(PTransformTranslation.PAR_DO_TRANSFORM_URN)
.setPayload(
ParDoPayload.newBuilder()
.setDoFn(FunctionSpec.newBuilder())
.putTimerFamilySpecs("timer", TimerFamilySpec.getDefaultInstance())
.build()
.toByteString()))
.setEnvironmentId("common")
.build();
Components components =
partialComponents
.toBuilder()
.putTransforms("parDo", parDoTransform)
.putPcollections("parDo.out", pc("parDo.out"))
.putTransforms("timer", timerTransform)
.putPcollections("timer.out", pc("timer.out"))
.putPcollections("output.out", pc("output.out"))
.putEnvironments("common", Environments.createDockerEnvironment("common"))
.build();
FusedPipeline fused =
GreedyPipelineFuser.fuse(
Pipeline.newBuilder()
.setComponents(components)
.addRequirements(ParDoTranslation.REQUIRES_STATEFUL_PROCESSING_URN)
.build());
assertThat(
fused.getRunnerExecutedTransforms(),
containsInAnyOrder(
PipelineNode.pTransform("impulse", components.getTransformsOrThrow("impulse"))));
assertThat(
fused.getFusedStages(),
containsInAnyOrder(
ExecutableStageMatcher.withInput("impulse.out")
.withOutputs("parDo.out")
.withTransforms("parDo"),
ExecutableStageMatcher.withInput("parDo.out").withNoOutputs().withTransforms("timer")));
}
|
@Udf
public <T extends Comparable<? super T>> List<T> arraySortDefault(@UdfParameter(
description = "The array to sort") final List<T> input) {
return arraySortWithDirection(input, "ASC");
}
|
@Test
public void shouldSortBigInts() {
final List<Long> input = Arrays.asList(1L, 3L, -2L);
final List<Long> output = udf.arraySortDefault(input);
assertThat(output, contains(-2L, 1L, 3L));
}
|
@SuppressWarnings("unchecked")
public <T extends Metric> T register(String name, T metric) throws IllegalArgumentException {
return register(MetricName.build(name), metric);
}
|
@Test
public void registeringAGaugeTriggersANotification() throws Exception {
assertThat(registry.register(THING, gauge))
.isEqualTo(gauge);
verify(listener).onGaugeAdded(THING, gauge);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.