focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@Override
public PollResult poll(long currentTimeMs) {
return pollInternal(
prepareFetchRequests(),
this::handleFetchSuccess,
this::handleFetchFailure
);
}
|
@Test
public void testFetchOffsetOutOfRangeException() {
buildFetcher(OffsetResetStrategy.NONE, new ByteArrayDeserializer(),
new ByteArrayDeserializer(), 2, IsolationLevel.READ_UNCOMMITTED);
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
sendFetches();
client.prepareResponse(fullFetchResponse(tidp0, records, Errors.OFFSET_OUT_OF_RANGE, 100L, 0));
networkClientDelegate.poll(time.timer(0));
assertFalse(subscriptions.isOffsetResetNeeded(tp0));
for (int i = 0; i < 2; i++) {
OffsetOutOfRangeException e = assertThrows(OffsetOutOfRangeException.class, () ->
collectFetch());
assertEquals(singleton(tp0), e.offsetOutOfRangePartitions().keySet());
assertEquals(0L, e.offsetOutOfRangePartitions().get(tp0).longValue());
}
}
|
@Override
public <VR> KStream<K, VR> flatMapValues(final ValueMapper<? super V, ? extends Iterable<? extends VR>> mapper) {
return flatMapValues(withKey(mapper));
}
|
@Test
public void shouldNotAllowNullNameOnFlatMapValuesWithKey() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.flatMapValues((k, v) -> Collections.emptyList(), null));
assertThat(exception.getMessage(), equalTo("named can't be null"));
}
|
DateRange getRange(String dateRangeString) throws ParseException {
if (dateRangeString == null || dateRangeString.isEmpty())
return null;
String[] dateArr = dateRangeString.split("-");
if (dateArr.length > 2 || dateArr.length < 1)
return null;
// throw new IllegalArgumentException("Only Strings containing two Date separated by a '-' or a single Date are allowed");
ParsedCalendar from = parseDateString(dateArr[0]);
ParsedCalendar to;
if (dateArr.length == 2)
to = parseDateString(dateArr[1]);
else
// faster and safe?
// to = new ParsedCalendar(from.parseType, (Calendar) from.parsedCalendar.clone());
to = parseDateString(dateArr[0]);
try {
return new DateRange(from, to);
} catch (IllegalArgumentException ex) {
return null;
}
}
|
@Test
public void testParseSimpleDateRangeWithoutYear() throws ParseException {
DateRange dateRange = dateRangeParser.getRange("Aug 10-Aug 14");
assertFalse(dateRange.isInRange(getCalendar(2014, Calendar.AUGUST, 9)));
assertTrue(dateRange.isInRange(getCalendar(2014, Calendar.AUGUST, 10)));
assertTrue(dateRange.isInRange(getCalendar(2014, Calendar.AUGUST, 12)));
assertTrue(dateRange.isInRange(getCalendar(2014, Calendar.AUGUST, 14)));
assertFalse(dateRange.isInRange(getCalendar(2014, Calendar.AUGUST, 15)));
}
|
@Override
public void run(Job job) throws Exception {
getBackgroundJobWorker(job).run();
if (Thread.currentThread().isInterrupted()) throw new InterruptedException();
}
|
@Test
void invokeJobMethodAlwaysResetsJobContextThreadLocal() {
final Job job = aJobInProgress().withJobDetails(this::throwingJobContext).build();
final AbstractBackgroundJobRunner backgroundJobRunner = getJobRunner(BackgroundJobWorker::new);
assertThatCode(() -> backgroundJobRunner.run(job)).isInstanceOf(Exception.class);
assertThat(ThreadLocalJobContext.getJobContext()).isNull();
}
|
public static <T> ArrayList<T> distinct(Collection<T> collection) {
if (isEmpty(collection)) {
return new ArrayList<>();
} else if (collection instanceof Set) {
return new ArrayList<>(collection);
} else {
return new ArrayList<>(new LinkedHashSet<>(collection));
}
}
|
@Test
public void distinctTest() {
final ArrayList<Integer> distinct = CollUtil.distinct(ListUtil.of(5, 3, 10, 9, 0, 5, 10, 9));
assertEquals(ListUtil.of(5, 3, 10, 9, 0), distinct);
}
|
@Override
public String toString() {
return getClass().getSimpleName() + "[sessionCount=" + getSessionCount() + ']';
}
|
@Test
public void testToString() {
final String string = sessionListener.toString();
assertNotNull("toString not null", string);
assertFalse("toString not empty", string.isEmpty());
}
|
@Override
public Serde<GenericKey> create(
final FormatInfo format,
final PersistenceSchema schema,
final KsqlConfig ksqlConfig,
final Supplier<SchemaRegistryClient> schemaRegistryClientFactory,
final String loggerNamePrefix,
final ProcessingLogContext processingLogContext,
final Optional<TrackedCallback> tracker
) {
return createInner(
format,
schema,
ksqlConfig,
schemaRegistryClientFactory,
loggerNamePrefix,
processingLogContext,
tracker
);
}
|
@Test
public void shouldWrapInLoggingSerdeNonWindowed() {
// When:
factory.create(format, schema, config, srClientFactory, LOGGER_PREFIX, processingLogCxt,
Optional.empty());
// Then:
verify(innerFactory).wrapInLoggingSerde(any(), eq(LOGGER_PREFIX), eq(processingLogCxt), eq(Optional.of(queryId)));
}
|
@Override
public boolean test(Pickle pickle) {
if (expressions.isEmpty()) {
return true;
}
List<String> tags = pickle.getTags();
return expressions.stream()
.allMatch(expression -> expression.evaluate(tags));
}
|
@Test
void single_tag_predicate_matches_pickle_with_more_tags() {
Pickle pickle = createPickleWithTags("@FOO", "@BAR");
TagPredicate predicate = createPredicate("@FOO");
assertTrue(predicate.test(pickle));
}
|
@Override
@Transactional(rollbackFor = Exception.class)
public Long createSpu(ProductSpuSaveReqVO createReqVO) {
// 校验分类、品牌
validateCategory(createReqVO.getCategoryId());
brandService.validateProductBrand(createReqVO.getBrandId());
// 校验 SKU
List<ProductSkuSaveReqVO> skuSaveReqList = createReqVO.getSkus();
productSkuService.validateSkuList(skuSaveReqList, createReqVO.getSpecType());
ProductSpuDO spu = BeanUtils.toBean(createReqVO, ProductSpuDO.class);
// 初始化 SPU 中 SKU 相关属性
initSpuFromSkus(spu, skuSaveReqList);
// 插入 SPU
productSpuMapper.insert(spu);
// 插入 SKU
productSkuService.createSkuList(spu.getId(), skuSaveReqList);
// 返回
return spu.getId();
}
|
@Test
public void testCreateSpu_success() {
// 准备参数
ProductSkuSaveReqVO skuCreateOrUpdateReqVO = randomPojo(ProductSkuSaveReqVO.class, o->{
// 限制范围为正整数
o.setCostPrice(generaInt());
o.setPrice(generaInt());
o.setMarketPrice(generaInt());
o.setStock(generaInt());
o.setFirstBrokeragePrice(generaInt());
o.setSecondBrokeragePrice(generaInt());
// 限制分数为两位数
o.setWeight(RandomUtil.randomDouble(10,2, RoundingMode.HALF_UP));
o.setVolume(RandomUtil.randomDouble(10,2, RoundingMode.HALF_UP));
});
ProductSpuSaveReqVO createReqVO = randomPojo(ProductSpuSaveReqVO.class,o->{
o.setCategoryId(generateId());
o.setBrandId(generateId());
o.setSort(RandomUtil.randomInt(1,100)); // 限制排序范围
o.setGiveIntegral(generaInt()); // 限制范围为正整数
o.setVirtualSalesCount(generaInt()); // 限制范围为正整数
o.setSkus(newArrayList(skuCreateOrUpdateReqVO,skuCreateOrUpdateReqVO,skuCreateOrUpdateReqVO));
});
when(categoryService.getCategoryLevel(eq(createReqVO.getCategoryId()))).thenReturn(2);
Long spu = productSpuService.createSpu(createReqVO);
ProductSpuDO productSpuDO = productSpuMapper.selectById(spu);
assertPojoEquals(createReqVO, productSpuDO);
}
|
@Override
public void isEqualTo(@Nullable Object expected) {
super.isEqualTo(expected);
}
|
@Test
public void isEqualTo_WithoutToleranceParameter_Fail_PlusMinusZero() {
expectFailureWhenTestingThat(array(0.0f)).isEqualTo(array(-0.0f));
assertFailureValue("expected", "[-0.0]");
assertFailureValue("but was", "[0.0]");
}
|
public <TResult> Iterable<TResult> distinct(String fieldName, Class<TResult> tResultClass) {
return delegate.distinct(fieldName, tResultClass);
}
|
@Test
void distinct() {
final var collection = jacksonCollection("simple", Simple.class);
final List<Simple> items = List.of(
new Simple("000000000000000000000001", "foo"),
new Simple("000000000000000000000002", "bar")
);
collection.insert(items);
}
|
@Override
public R apply(R record) {
final Matcher matcher = regex.matcher(record.topic());
if (matcher.matches()) {
final String topic = matcher.replaceFirst(replacement);
log.trace("Rerouting from topic '{}' to new topic '{}'", record.topic(), topic);
return record.newRecord(topic, record.kafkaPartition(), record.keySchema(), record.key(), record.valueSchema(), record.value(), record.timestamp());
} else {
log.trace("Not rerouting topic '{}' as it does not match the configured regex", record.topic());
}
return record;
}
|
@Test
public void addPrefix() {
assertEquals("prefix-orig", apply("(.*)", "prefix-$1", "orig"));
}
|
protected KllHistogramEstimator mergeHistogramEstimator(
String columnName, KllHistogramEstimator oldEst, KllHistogramEstimator newEst) {
if (oldEst != null && newEst != null) {
if (oldEst.canMerge(newEst)) {
LOG.trace("Merging old sketch {} with new sketch {}...", oldEst.getSketch(), newEst.getSketch());
oldEst.mergeEstimators(newEst);
LOG.trace("Resulting sketch is {}", oldEst.getSketch());
return oldEst;
}
LOG.debug("Merging histograms of column {}", columnName);
} else if (newEst != null) {
LOG.trace("Old sketch is empty, the new sketch is used {}", newEst.getSketch());
return newEst;
}
return oldEst;
}
|
@Test
public void testMergeNullHistogramEstimators() {
assertNull(MERGER.mergeHistogramEstimator("", null, null));
}
|
public static InetSocketAddress getInetSocketAddressFromRpcURL(String rpcURL) throws Exception {
// Pekko URLs have the form schema://systemName@host:port/.... if it's a remote Pekko URL
try {
final Address address = getAddressFromRpcURL(rpcURL);
if (address.host().isDefined() && address.port().isDefined()) {
return new InetSocketAddress(address.host().get(), (int) address.port().get());
} else {
throw new MalformedURLException();
}
} catch (MalformedURLException e) {
throw new Exception("Could not retrieve InetSocketAddress from Pekko URL " + rpcURL);
}
}
|
@Test
void getHostFromRpcURLHandlesIPv6AddressesSsl() throws Exception {
final String ipv6Address = "2001:db8:10:11:12:ff00:42:8329";
final int port = 1234;
final InetSocketAddress address = new InetSocketAddress(ipv6Address, port);
final String url =
"pekko.ssl.tcp://flink@[" + ipv6Address + "]:" + port + "/user/jobmanager";
final InetSocketAddress result = PekkoUtils.getInetSocketAddressFromRpcURL(url);
assertThat(result).isEqualTo(address);
}
|
static QueryId buildId(
final Statement statement,
final EngineContext engineContext,
final QueryIdGenerator idGenerator,
final OutputNode outputNode,
final boolean createOrReplaceEnabled,
final Optional<String> withQueryId) {
if (withQueryId.isPresent()) {
final String queryId = withQueryId.get().toUpperCase();
validateWithQueryId(queryId);
return new QueryId(queryId);
}
if (statement instanceof CreateTable && ((CreateTable) statement).isSource()) {
// Use the CST name as part of the QueryID
final String suffix = ((CreateTable) statement).getName().text().toUpperCase()
+ "_" + idGenerator.getNext().toUpperCase();
return new QueryId(ReservedQueryIdsPrefixes.CST + suffix);
}
if (!outputNode.getSinkName().isPresent()) {
final String prefix =
"transient_" + outputNode.getSource().getLeftmostSourceNode().getAlias().text() + "_";
return new QueryId(prefix + Math.abs(ThreadLocalRandom.current().nextLong()));
}
final KsqlStructuredDataOutputNode structured = (KsqlStructuredDataOutputNode) outputNode;
if (!structured.createInto()) {
return new QueryId(ReservedQueryIdsPrefixes.INSERT + idGenerator.getNext());
}
final SourceName sink = outputNode.getSinkName().get();
final Set<QueryId> queriesForSink = engineContext.getQueryRegistry().getQueriesWithSink(sink);
if (queriesForSink.size() > 1) {
throw new KsqlException("REPLACE for sink " + sink + " is not supported because there are "
+ "multiple queries writing into it: " + queriesForSink);
} else if (!queriesForSink.isEmpty()) {
if (!createOrReplaceEnabled) {
final String type = outputNode.getNodeOutputType().getKsqlType().toLowerCase();
throw new UnsupportedOperationException(
String.format(
"Cannot add %s '%s': A %s with the same name already exists",
type,
sink.text(),
type));
}
return Iterables.getOnlyElement(queriesForSink);
}
final String suffix = outputNode.getId().toString().toUpperCase()
+ "_" + idGenerator.getNext().toUpperCase();
return new QueryId(
outputNode.getNodeOutputType() == DataSourceType.KTABLE
? ReservedQueryIdsPrefixes.CTAS + suffix
: ReservedQueryIdsPrefixes.CSAS + suffix
);
}
|
@Test
public void shouldThrowIfWithQueryIdIsNotValid() {
// When:
final Exception e = assertThrows(
Exception.class,
() -> QueryIdUtil.buildId(statement, engineContext, idGenerator, plan,
false, Optional.of("with space"))
);
// Then:
assertThat(e.getMessage(), containsString(
"Query IDs may contain only alphanumeric characters and '_'. Got: 'WITH SPACE'"));
}
|
@Override
public String getName() {
return name;
}
|
@Test
public void namePropagation() {
then(timeLimiter.getName()).isEqualTo(NAME);
}
|
@Override
public ClusterInfo clusterGetClusterInfo() {
RFuture<Map<String, String>> f = executorService.readAsync((String)null, StringCodec.INSTANCE, RedisCommands.CLUSTER_INFO);
Map<String, String> entries = syncFuture(f);
Properties props = new Properties();
for (Entry<String, String> entry : entries.entrySet()) {
props.setProperty(entry.getKey(), entry.getValue());
}
return new ClusterInfo(props);
}
|
@Test
public void testClusterGetClusterInfo() {
testInCluster(connection -> {
ClusterInfo info = connection.clusterGetClusterInfo();
assertThat(info.getSlotsFail()).isEqualTo(0);
assertThat(info.getSlotsOk()).isEqualTo(MasterSlaveConnectionManager.MAX_SLOT);
assertThat(info.getSlotsAssigned()).isEqualTo(MasterSlaveConnectionManager.MAX_SLOT);
});
}
|
public static Object get(final ConvertedMap data, final FieldReference field) {
final Object target = findParent(data, field);
return target == null ? null : fetch(target, field.getKey());
}
|
@Test
public void testAbsentBareGet() throws Exception {
Map<Serializable, Object> data = new HashMap<>();
data.put("foo", "bar");
String reference = "baz";
assertNull(get(ConvertedMap.newFromMap(data), reference));
}
|
public static String normalizeUri(String uri) throws URISyntaxException {
// try to parse using the simpler and faster Camel URI parser
String[] parts = CamelURIParser.fastParseUri(uri);
if (parts != null) {
// we optimized specially if an empty array is returned
if (parts == URI_ALREADY_NORMALIZED) {
return uri;
}
// use the faster and more simple normalizer
return doFastNormalizeUri(parts);
} else {
// use the legacy normalizer as the uri is complex and may have unsafe URL characters
return doComplexNormalizeUri(uri);
}
}
|
@Test
public void testNormalizeEndpointWithEqualSignInParameter() throws Exception {
String out = URISupport.normalizeUri("jms:queue:foo?selector=somekey='somevalue'&foo=bar");
assertNotNull(out);
// Camel will safe encode the URI
assertEquals("jms://queue:foo?foo=bar&selector=somekey%3D%27somevalue%27", out);
}
|
static InitWriterConfig fromMap(Map<String, String> map) {
Map<String, String> envMap = new HashMap<>(map);
envMap.keySet().retainAll(InitWriterConfig.keyNames());
Map<String, Object> generatedMap = ConfigParameter.define(envMap, CONFIG_VALUES);
return new InitWriterConfig(generatedMap);
}
|
@Test
public void testFromMap() {
InitWriterConfig config = InitWriterConfig.fromMap(ENV_VARS);
assertThat(config.getNodeName(), is("localhost"));
assertThat(config.getRackTopologyKey(), is("failure-domain.beta.kubernetes.io/zone"));
assertThat(config.isExternalAddress(), is(true));
assertThat(config.getAddressType(), is(nullValue()));
}
|
@Override
public <V1, R> KTable<K, R> outerJoin(final KTable<K, V1> other,
final ValueJoiner<? super V, ? super V1, ? extends R> joiner) {
return outerJoin(other, joiner, NamedInternal.empty());
}
|
@Test
public void shouldNotAllowNullJoinerOnOuterJoin() {
assertThrows(NullPointerException.class, () -> table.outerJoin(table, null));
}
|
@Override
public long computeLocalQuota(long confUsage, long myUsage, long[] allUsages) throws PulsarAdminException {
// ToDo: work out the initial conditions: we may allow a small number of "first few iterations" to go
// unchecked as we get some history of usage, or follow some other "TBD" method.
if (confUsage < 0) {
// This can happen if the RG is not configured with this particular limit (message or byte count) yet.
val retVal = -1;
if (log.isDebugEnabled()) {
log.debug("Configured usage ({}) is not set; returning a special value ({}) for calculated quota",
confUsage, retVal);
}
return retVal;
}
long totalUsage = 0;
for (long usage : allUsages) {
totalUsage += usage;
}
if (myUsage < 0 || totalUsage < 0) {
String errMesg = String.format("Local usage (%d) or total usage (%d) is negative",
myUsage, totalUsage);
log.error(errMesg);
throw new PulsarAdminException(errMesg);
}
// If the total usage is zero (which may happen during initial transients), just return the configured value.
// The caller is expected to check the value returned, or not call here with a zero global usage.
// [This avoids a division by zero when calculating the local share.]
if (totalUsage == 0) {
if (log.isDebugEnabled()) {
log.debug("computeLocalQuota: totalUsage is zero; "
+ "returning the configured usage ({}) as new local quota",
confUsage);
}
return confUsage;
}
if (myUsage > totalUsage) {
String errMesg = String.format("Local usage (%d) is greater than total usage (%d)",
myUsage, totalUsage);
// Log as a warning [in case this can happen transiently (?)].
log.warn(errMesg);
}
// How much unused capacity is left over?
float residual = confUsage - totalUsage;
// New quota is the old usage incremented by any residual as a ratio of the local usage to the total usage.
// This should result in the calculatedQuota increasing proportionately if total usage is less than the
// configured usage, and reducing proportionately if the total usage is greater than the configured usage.
// Capped to 1, to prevent negative or zero setting of quota.
// the rate limiter code assumes that rate value of 0 or less to mean that no rate limit should be applied
float myUsageFraction = (float) myUsage / totalUsage;
float calculatedQuota = max(myUsage + residual * myUsageFraction, 1);
val longCalculatedQuota = (long) calculatedQuota;
if (log.isDebugEnabled()) {
log.debug("computeLocalQuota: myUsage={}, totalUsage={}, myFraction={}; newQuota returned={} [long: {}]",
myUsage, totalUsage, myUsageFraction, calculatedQuota, longCalculatedQuota);
}
return longCalculatedQuota;
}
|
@Test
public void testRQCalcGlobUsedLessThanConfigTest() throws PulsarAdminException {
final long config = 100;
final long localUsed = 20;
final long[] allUsage = { 40 };
final long newQuota = this.rqCalc.computeLocalQuota(config, localUsed, allUsage);
Assert.assertTrue(newQuota > localUsed);
}
|
@Override
public double getStdDev() {
// two-pass algorithm for variance, avoids numeric overflow
if (values.length <= 1) {
return 0;
}
final double mean = getMean();
double sum = 0;
for (long value : values) {
final double diff = value - mean;
sum += diff * diff;
}
final double variance = sum / (values.length - 1);
return Math.sqrt(variance);
}
|
@Test
public void calculatesAStdDevOfZeroForAnEmptySnapshot() {
final Snapshot emptySnapshot = new UniformSnapshot(new long[]{});
assertThat(emptySnapshot.getStdDev())
.isZero();
}
|
public String join(final Stream<?> parts) {
return join(parts.iterator());
}
|
@Test
public void shouldHandleTwoItems() {
assertThat(joiner.join(ImmutableList.of(1, 2)), is("1 or 2"));
}
|
private AlarmId(DeviceId id, String uniqueIdentifier) {
super(id.toString() + ":" + uniqueIdentifier);
checkNotNull(id, "device id must not be null");
checkNotNull(uniqueIdentifier, "unique identifier must not be null");
checkArgument(!uniqueIdentifier.isEmpty(), "unique identifier must not be empty");
}
|
@Test
public void testNonEquality() {
final AlarmId id1 = AlarmId.alarmId(DEVICE_ID, UNIQUE_ID_1);
final AlarmId id2 = AlarmId.alarmId(DEVICE_ID, UNIQUE_ID_2);
assertThat(id1, is(not(id2)));
}
|
@Override
public Path mkdir(final Path folder, final TransferStatus status) throws BackgroundException {
try {
if(containerService.isContainer(folder)) {
final Storage.Buckets.Insert request = session.getClient().buckets().insert(session.getHost().getCredentials().getUsername(),
new Bucket()
.setLocation(status.getRegion())
.setStorageClass(status.getStorageClass())
.setName(containerService.getContainer(folder).getName()));
final Bucket bucket = request.execute();
final EnumSet<Path.Type> type = EnumSet.copyOf(folder.getType());
type.add(Path.Type.volume);
return folder.withType(type).withAttributes(new GoogleStorageAttributesFinderFeature(session).toAttributes(bucket));
}
else {
final EnumSet<Path.Type> type = EnumSet.copyOf(folder.getType());
type.add(Path.Type.placeholder);
// Add placeholder object
return new GoogleStorageTouchFeature(session).withWriter(writer).touch(folder.withType(type),
status.withMime(MIMETYPE));
}
}
catch(IOException e) {
throw new GoogleStorageExceptionMappingService().map("Cannot create folder {0}", e, folder);
}
}
|
@Test
public void testMakeBucket() throws Exception {
final Path test = new Path(new DefaultHomeFinderService(session).find(),
new AsciiRandomStringService().random().toLowerCase(Locale.ROOT), EnumSet.of(Path.Type.directory, Path.Type.volume));
new GoogleStorageDirectoryFeature(session).mkdir(test, new TransferStatus().withRegion("us"));
assertTrue(new GoogleStorageFindFeature(session).find(test));
assertThrows(ConflictException.class, () -> new GoogleStorageDirectoryFeature(session).mkdir(test, new TransferStatus()));
new GoogleStorageDeleteFeature(session).delete(Collections.<Path>singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
public static GetNodesToAttributesResponse mergeNodesToAttributesResponse(
Collection<GetNodesToAttributesResponse> responses) {
Map<String, Set<NodeAttribute>> attributesMap = new HashMap<>();
for (GetNodesToAttributesResponse response : responses) {
if (response != null && response.getNodeToAttributes() != null) {
attributesMap.putAll(response.getNodeToAttributes());
}
}
return GetNodesToAttributesResponse.newInstance(attributesMap);
}
|
@Test
public void testMergeNodesToAttributesResponse() {
// normal response1
NodeAttribute gpu = NodeAttribute.newInstance(NodeAttribute.PREFIX_CENTRALIZED, "GPU",
NodeAttributeType.STRING, "nvida");
NodeAttribute os = NodeAttribute.newInstance(NodeAttribute.PREFIX_CENTRALIZED, "OS",
NodeAttributeType.STRING, "windows64");
NodeAttribute dist = NodeAttribute.newInstance(NodeAttribute.PREFIX_DISTRIBUTED, "VERSION",
NodeAttributeType.STRING, "3_0_2");
Map<String, Set<NodeAttribute>> node1Map = new HashMap<>();
node1Map.put("node1", ImmutableSet.of(gpu, os, dist));
GetNodesToAttributesResponse response1 = GetNodesToAttributesResponse.newInstance(node1Map);
// normal response2
NodeAttribute docker = NodeAttribute.newInstance(NodeAttribute.PREFIX_DISTRIBUTED, "DOCKER",
NodeAttributeType.STRING, "docker0");
Map<String, Set<NodeAttribute>> node2Map = new HashMap<>();
node2Map.put("node2", ImmutableSet.of(docker));
GetNodesToAttributesResponse response2 = GetNodesToAttributesResponse.newInstance(node2Map);
// empty response3
GetNodesToAttributesResponse response3 =
GetNodesToAttributesResponse.newInstance(new HashMap<>());
// null response4
GetNodesToAttributesResponse response4 = null;
List<GetNodesToAttributesResponse> responses = new ArrayList<>();
responses.add(response1);
responses.add(response2);
responses.add(response3);
responses.add(response4);
GetNodesToAttributesResponse response =
RouterYarnClientUtils.mergeNodesToAttributesResponse(responses);
Assert.assertNotNull(response);
Map<String, Set<NodeAttribute>> hostToAttrs = response.getNodeToAttributes();
Assert.assertNotNull(hostToAttrs);
Assert.assertEquals(2, hostToAttrs.size());
Assert.assertTrue(hostToAttrs.get("node1").contains(dist));
Assert.assertTrue(hostToAttrs.get("node1").contains(gpu));
Assert.assertTrue(hostToAttrs.get("node1").contains(os));
Assert.assertTrue(hostToAttrs.get("node2").contains(docker));
}
|
public GetTelemetrySubscriptionsResponse processGetTelemetrySubscriptionRequest(
GetTelemetrySubscriptionsRequest request, RequestContext requestContext) {
long now = time.milliseconds();
Uuid clientInstanceId = Optional.ofNullable(request.data().clientInstanceId())
.filter(id -> !id.equals(Uuid.ZERO_UUID))
.orElse(generateNewClientId());
/*
Get the client instance from the cache or create a new one. If subscription has changed
since the last request, then the client instance will be re-evaluated. Validation of the
request will be done after the client instance is created. If client issues another get
telemetry request prior to push interval, then the client should get a throttle error but if
the subscription has changed since the last request then the client should get the updated
subscription immediately.
*/
ClientMetricsInstance clientInstance = clientInstance(clientInstanceId, requestContext);
try {
// Validate the get request parameters for the client instance.
validateGetRequest(request, clientInstance, now);
} catch (ApiException exception) {
return request.getErrorResponse(0, exception);
}
clientInstance.lastKnownError(Errors.NONE);
return createGetSubscriptionResponse(clientInstanceId, clientInstance);
}
|
@Test
public void testGetTelemetrySameClientImmediateRetryFail() throws Exception {
GetTelemetrySubscriptionsRequest request = new GetTelemetrySubscriptionsRequest.Builder(
new GetTelemetrySubscriptionsRequestData(), true).build();
GetTelemetrySubscriptionsResponse response = clientMetricsManager.processGetTelemetrySubscriptionRequest(
request, ClientMetricsTestUtils.requestContext());
Uuid clientInstanceId = response.data().clientInstanceId();
assertNotNull(clientInstanceId);
assertEquals(Errors.NONE, response.error());
request = new GetTelemetrySubscriptionsRequest.Builder(
new GetTelemetrySubscriptionsRequestData().setClientInstanceId(clientInstanceId), true).build();
response = clientMetricsManager.processGetTelemetrySubscriptionRequest(
request, ClientMetricsTestUtils.requestContext());
assertEquals(Errors.THROTTLING_QUOTA_EXCEEDED, response.error());
assertEquals((double) 1, getMetric(ClientMetricsManager.ClientMetricsStats.INSTANCE_COUNT).metricValue());
// Should register 1 throttle metric.
assertEquals((double) 1, getMetric(ClientMetricsManager.ClientMetricsStats.THROTTLE + "-count").metricValue());
assertTrue((double) getMetric(ClientMetricsManager.ClientMetricsStats.THROTTLE + "-rate").metricValue() > 0);
}
|
public JobStatus getJobStatus(JobID oldJobID) throws IOException {
org.apache.hadoop.mapreduce.v2.api.records.JobId jobId =
TypeConverter.toYarn(oldJobID);
GetJobReportRequest request =
recordFactory.newRecordInstance(GetJobReportRequest.class);
request.setJobId(jobId);
JobReport report = ((GetJobReportResponse) invoke("getJobReport",
GetJobReportRequest.class, request)).getJobReport();
JobStatus jobStatus = null;
if (report != null) {
if (StringUtils.isEmpty(report.getJobFile())) {
String jobFile = MRApps.getJobFile(conf, report.getUser(), oldJobID);
report.setJobFile(jobFile);
}
String historyTrackingUrl = report.getTrackingUrl();
String url = StringUtils.isNotEmpty(historyTrackingUrl)
? historyTrackingUrl : trackingUrl;
jobStatus = TypeConverter.fromYarn(report, url);
}
return jobStatus;
}
|
@Test
public void testJobReportFromHistoryServer() throws Exception {
MRClientProtocol historyServerProxy = mock(MRClientProtocol.class);
when(historyServerProxy.getJobReport(getJobReportRequest())).thenReturn(
getJobReportResponseFromHistoryServer());
ResourceMgrDelegate rm = mock(ResourceMgrDelegate.class);
when(rm.getApplicationReport(TypeConverter.toYarn(oldJobId).getAppId()))
.thenReturn(null);
ClientServiceDelegate clientServiceDelegate = getClientServiceDelegate(
historyServerProxy, rm);
JobStatus jobStatus = clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus);
Assert.assertEquals("TestJobFilePath", jobStatus.getJobFile());
Assert.assertEquals("http://TestTrackingUrl", jobStatus.getTrackingUrl());
Assert.assertEquals(1.0f, jobStatus.getMapProgress(), 0.0f);
Assert.assertEquals(1.0f, jobStatus.getReduceProgress(), 0.0f);
}
|
@Override
public State cancel() throws IOException {
// Enforce that a cancel() call on the job is done at most once - as
// a workaround for Dataflow service's current bugs with multiple
// cancellation, where it may sometimes return an error when cancelling
// a job that was already cancelled, but still report the job state as
// RUNNING.
// To partially work around these issues, we absorb duplicate cancel()
// calls. This, of course, doesn't address the case when the job terminates
// externally almost concurrently to calling cancel(), but at least it
// makes it possible to safely call cancel() multiple times and from
// multiple threads in one program.
FutureTask<State> tentativeCancelTask =
new FutureTask<>(
() -> {
Job content = new Job();
content.setProjectId(getProjectId());
String currentJobId = getJobId();
content.setId(currentJobId);
content.setRequestedState("JOB_STATE_CANCELLED");
try {
Job job = dataflowClient.updateJob(currentJobId, content);
return MonitoringUtil.toState(job.getCurrentState());
} catch (IOException e) {
State state = getState();
if (state.isTerminal()) {
LOG.warn("Cancel failed because job is already terminated. State is {}", state);
return state;
} else if (e.getMessage().contains("has terminated")) {
// This handles the case where the getState() call above returns RUNNING but the
// cancel was rejected because the job is in fact done. Hopefully, someday we can
// delete this code if there is better consistency between the State and whether
// Cancel succeeds.
//
// Example message:
// Workflow modification failed. Causes: (7603adc9e9bff51e): Cannot perform
// operation 'cancel' on Job: 2017-04-01_22_50_59-9269855660514862348. Job has
// terminated in state SUCCESS: Workflow job:
// 2017-04-01_22_50_59-9269855660514862348 succeeded.
LOG.warn("Cancel failed because job is already terminated.", e);
return state;
} else {
String errorMsg =
String.format(
"Failed to cancel job in state %s, "
+ "please go to the Developers Console to cancel it manually: %s",
state,
MonitoringUtil.getJobMonitoringPageURL(
getProjectId(), getRegion(), getJobId()));
LOG.warn(errorMsg);
throw new IOException(errorMsg, e);
}
}
});
if (cancelState.compareAndSet(null, tentativeCancelTask)) {
// This thread should perform cancellation, while others will
// only wait for the result.
cancelState.get().run();
}
try {
return cancelState.get().get();
} catch (InterruptedException | ExecutionException e) {
throw new IOException(e);
}
}
|
@Test
public void testCancelTerminatedJob() throws IOException {
Dataflow.Projects.Locations.Jobs.Get statusRequest =
mock(Dataflow.Projects.Locations.Jobs.Get.class);
Job statusResponse = new Job();
statusResponse.setCurrentState("JOB_STATE_FAILED");
when(mockJobs.get(PROJECT_ID, REGION_ID, JOB_ID)).thenReturn(statusRequest);
when(statusRequest.execute()).thenReturn(statusResponse);
Dataflow.Projects.Locations.Jobs.Update update =
mock(Dataflow.Projects.Locations.Jobs.Update.class);
when(mockJobs.update(eq(PROJECT_ID), eq(REGION_ID), eq(JOB_ID), any(Job.class)))
.thenReturn(update);
when(update.execute()).thenThrow(new IOException());
DataflowPipelineJob job =
new DataflowPipelineJob(DataflowClient.create(options), JOB_ID, options, null);
assertEquals(State.FAILED, job.cancel());
Job content = new Job();
content.setProjectId(PROJECT_ID);
content.setId(JOB_ID);
content.setRequestedState("JOB_STATE_CANCELLED");
verify(mockJobs).update(eq(PROJECT_ID), eq(REGION_ID), eq(JOB_ID), eq(content));
verify(mockJobs).get(PROJECT_ID, REGION_ID, JOB_ID);
verifyNoMoreInteractions(mockJobs);
}
|
public static ConfigRepoConfig createConfigRepoConfig(MaterialConfig repo, String pluginId, String id) {
return (ConfigRepoConfig) new ConfigRepoConfig().setRepo(repo).setPluginId(pluginId).setId(id);
}
|
@Test
public void validateTree_configRepoShouldBeInvalidIfMaterialConfigHasErrors() {
CruiseConfig cruiseConfig = new BasicCruiseConfig();
MaterialConfig materialConfig = new GitMaterialConfig(); // should be invalid since URL is not set
ConfigRepoConfig configRepoConfig = ConfigRepoConfig.createConfigRepoConfig(materialConfig, "plug", "id");
cruiseConfig.setConfigRepos(new ConfigReposConfig(configRepoConfig));
ConfigSaveValidationContext validationContext = ConfigSaveValidationContext.forChain(cruiseConfig);
configRepoConfig.validateTree(validationContext);
assertThat(configRepoConfig.errors().isEmpty()).isTrue();
assertThat(configRepoConfig.getRepo().errors().isEmpty()).isFalse();
}
|
@Override
public void decrementOnlyReserved(@Nonnull UUID txnId) {
decrement0(txnId, false);
}
|
@Test
public void decrementOnlyReserved() {
UUID txnId = UuidUtil.newSecureUUID();
for (int i = 0; i < 11; i++) {
counter.increment(txnId, false);
}
for (int i = 0; i < 11; i++) {
counter.decrementOnlyReserved(txnId);
}
Map<UUID, Long> countPerTxnId = counter.getReservedCapacityCountPerTxnId();
assertNull(countPerTxnId.get(txnId));
assertEquals(11L, nodeWideUsedCapacityCounter.currentValue());
}
|
protected final void checkNotClosed() {
if (closed) {
throw new IllegalStateException("This connection manager has already been closed.");
}
}
|
@Test
void testCheckNotClosed() {
AbstractServiceConnectionManager<Object> connectionManager =
new TestServiceConnectionManager();
connectionManager.checkNotClosed();
connectionManager.connect(new Object());
connectionManager.checkNotClosed();
connectionManager.disconnect();
connectionManager.checkNotClosed();
connectionManager.close();
assertThatThrownBy(connectionManager::checkNotClosed)
.as("checkNotClosed() did not fail for a closed connection manager")
.isInstanceOf(IllegalStateException.class);
}
|
public static FilePredicate create(Collection<FilePredicate> predicates) {
if (predicates.isEmpty()) {
return TruePredicate.TRUE;
}
AndPredicate result = new AndPredicate();
for (FilePredicate filePredicate : predicates) {
if (filePredicate == TruePredicate.TRUE) {
continue;
} else if (filePredicate == FalsePredicate.FALSE) {
return FalsePredicate.FALSE;
} else if (filePredicate instanceof AndPredicate andPredicate) {
result.predicates.addAll(andPredicate.predicates);
} else {
result.predicates.add(OptimizedFilePredicateAdapter.create(filePredicate));
}
}
Collections.sort(result.predicates);
return result;
}
|
@Test
public void simplifyAndExpressionsWhenEmpty() {
FilePredicate andPredicate = AndPredicate.create(Arrays.asList());
assertThat(andPredicate).isEqualTo(TruePredicate.TRUE);
}
|
@VisibleForTesting
static int checkJar(Path file) throws Exception {
final URI uri = file.toUri();
int numSevereIssues = 0;
try (final FileSystem fileSystem =
FileSystems.newFileSystem(
new URI("jar:file", uri.getHost(), uri.getPath(), uri.getFragment()),
Collections.emptyMap())) {
if (isTestJarAndEmpty(file, fileSystem.getPath("/"))) {
return 0;
}
if (!noticeFileExistsAndIsValid(fileSystem.getPath("META-INF", "NOTICE"), file)) {
numSevereIssues++;
}
if (!licenseFileExistsAndIsValid(fileSystem.getPath("META-INF", "LICENSE"), file)) {
numSevereIssues++;
}
numSevereIssues +=
getNumLicenseFilesOutsideMetaInfDirectory(file, fileSystem.getPath("/"));
numSevereIssues += getFilesWithIncompatibleLicenses(file, fileSystem.getPath("/"));
}
return numSevereIssues;
}
|
@Test
void testIgnoreWebThirdPartyLicenses(@TempDir Path tempDir) throws Exception {
assertThat(
JarFileChecker.checkJar(
createJar(
tempDir,
Entry.fileEntry(VALID_NOTICE_CONTENTS, VALID_NOTICE_PATH),
Entry.fileEntry(VALID_LICENSE_CONTENTS, VALID_LICENSE_PATH),
Entry.fileEntry(
"class contents",
Arrays.asList("web", "3rdpartylicenses.txt")))))
.isEqualTo(0);
}
|
@Override
public boolean createReservation(ReservationId reservationId, String user,
Plan plan, ReservationDefinition contract) throws PlanningException {
LOG.info("placing the following ReservationRequest: " + contract);
try {
boolean res =
planner.createReservation(reservationId, user, plan, contract);
if (res) {
LOG.info("OUTCOME: SUCCESS, Reservation ID: "
+ reservationId.toString() + ", Contract: " + contract.toString());
} else {
LOG.info("OUTCOME: FAILURE, Reservation ID: "
+ reservationId.toString() + ", Contract: " + contract.toString());
}
return res;
} catch (PlanningException e) {
LOG.info("OUTCOME: FAILURE, Reservation ID: " + reservationId.toString()
+ ", Contract: " + contract.toString());
throw e;
}
}
|
@Test
public void testOrder() throws PlanningException {
prepareBasicPlan();
// create a completely utilized segment around time 30
int[] f = { 100, 100 };
ReservationDefinition rDef =
ReservationSystemTestUtil.createSimpleReservationDefinition(30 * step,
30 * step + f.length * step, f.length * step, 1,
recurrenceExpression);
assertTrue(plan.toString(),
plan.addReservation(new InMemoryReservationAllocation(
ReservationSystemTestUtil.getNewReservationId(), rDef, "u1",
"dedicated", 30 * step, 30 * step + f.length * step,
ReservationSystemTestUtil.generateAllocation(30 * step, step, f),
res, minAlloc), false));
// create a chain of 4 RR, mixing gang and non-gang
ReservationDefinition rr = new ReservationDefinitionPBImpl();
rr.setArrival(0 * step);
rr.setDeadline(70 * step);
rr.setRecurrenceExpression(recurrenceExpression);
ReservationRequests reqs = new ReservationRequestsPBImpl();
reqs.setInterpreter(ReservationRequestInterpreter.R_ORDER);
ReservationRequest r = ReservationRequest.newInstance(
Resource.newInstance(2048, 2), 10, 1, 10 * step);
ReservationRequest r2 = ReservationRequest.newInstance(
Resource.newInstance(1024, 1), 10, 10, 20 * step);
List<ReservationRequest> list = new ArrayList<ReservationRequest>();
list.add(r);
list.add(r2);
list.add(r);
list.add(r2);
reqs.setReservationResources(list);
rr.setReservationRequests(reqs);
// submit to agent
ReservationId reservationID = ReservationSystemTestUtil
.getNewReservationId();
agent.createReservation(reservationID, "u1", plan, rr);
// validate
assertTrue("Agent-based allocation failed", reservationID != null);
assertTrue("Agent-based allocation failed", plan.getAllReservations()
.size() == 4);
ReservationAllocation cs = plan.getReservationById(reservationID);
if (allocateLeft) {
assertTrue(cs.toString(), check(cs, 0 * step, 10 * step, 20, 1024, 1));
assertTrue(cs.toString(), check(cs, 10 * step, 30 * step, 10, 1024, 1));
assertTrue(cs.toString(), check(cs, 32 * step, 42 * step, 20, 1024, 1));
assertTrue(cs.toString(), check(cs, 42 * step, 62 * step, 10, 1024, 1));
} else {
assertTrue(cs.toString(), check(cs, 0 * step, 10 * step, 20, 1024, 1));
assertTrue(cs.toString(), check(cs, 10 * step, 30 * step, 10, 1024, 1));
assertTrue(cs.toString(), check(cs, 40 * step, 50 * step, 20, 1024, 1));
assertTrue(cs.toString(), check(cs, 50 * step, 70 * step, 10, 1024, 1));
}
System.out.println("--------AFTER ORDER ALLOCATION (queue: "
+ reservationID + ")----------");
System.out.println(plan.toString());
System.out.println(plan.toCumulativeString());
}
|
static Object parseCell(String cell, Schema.Field field) {
Schema.FieldType fieldType = field.getType();
try {
switch (fieldType.getTypeName()) {
case STRING:
return cell;
case INT16:
return Short.parseShort(cell);
case INT32:
return Integer.parseInt(cell);
case INT64:
return Long.parseLong(cell);
case BOOLEAN:
return Boolean.parseBoolean(cell);
case BYTE:
return Byte.parseByte(cell);
case DECIMAL:
return new BigDecimal(cell);
case DOUBLE:
return Double.parseDouble(cell);
case FLOAT:
return Float.parseFloat(cell);
case DATETIME:
return Instant.parse(cell);
default:
throw new UnsupportedOperationException(
"Unsupported type: " + fieldType + ", consider using withCustomRecordParsing");
}
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException(
e.getMessage() + " field " + field.getName() + " was received -- type mismatch");
}
}
|
@Test
public void givenValidDoubleCell_parses() {
DefaultMapEntry cellToExpectedValue = new DefaultMapEntry("10.05", 10.05);
Schema schema = Schema.builder().addDoubleField("a_double").addStringField("a_string").build();
assertEquals(
cellToExpectedValue.getValue(),
CsvIOParseHelpers.parseCell(
cellToExpectedValue.getKey().toString(), schema.getField("a_double")));
}
|
public static Type transformTableColumnType(Type srcType) {
return transformTableColumnType(srcType, true);
}
|
@Test
public void testConvertCatalogMaxStringToOlapMaxString() {
ScalarType catalogString = ScalarType.createDefaultCatalogString();
ScalarType convertedString = (ScalarType) AnalyzerUtils.transformTableColumnType(catalogString);
Assert.assertEquals(ScalarType.getOlapMaxVarcharLength(), convertedString.getLength());
}
|
public static <T> T[] remove(final T[] oldElements, final T elementToRemove)
{
final int length = oldElements.length;
int index = UNKNOWN_INDEX;
for (int i = 0; i < length; i++)
{
if (oldElements[i] == elementToRemove)
{
index = i;
}
}
return remove(oldElements, index);
}
|
@Test
void shouldRemoveByIndex()
{
final Integer[] result = ArrayUtil.remove(values, 0);
assertArrayEquals(new Integer[]{ TWO }, result);
}
|
public void complete(T value) {
try {
if (value instanceof RuntimeException)
throw new IllegalArgumentException("The argument to complete can not be an instance of RuntimeException");
if (!result.compareAndSet(INCOMPLETE_SENTINEL, value))
throw new IllegalStateException("Invalid attempt to complete a request future which is already complete");
fireSuccess();
} finally {
completedLatch.countDown();
}
}
|
@Test
public void testRuntimeExceptionInComplete() {
RequestFuture<Exception> future = new RequestFuture<>();
assertThrows(IllegalArgumentException.class, () -> future.complete(new RuntimeException()));
}
|
public static BigDecimal cast(final Integer value, final int precision, final int scale) {
if (value == null) {
return null;
}
return cast(value.longValue(), precision, scale);
}
|
@Test
public void shouldNotCastStringNonNumber() {
// When:
assertThrows(
NumberFormatException.class,
() -> cast("abc", 2, 1)
);
}
|
@VisibleForTesting
static boolean shouldIncludeRequestMessageHeader(final String header) {
return !HttpHeaders.X_FORWARDED_FOR.equalsIgnoreCase(header.trim());
}
|
@Test
void testShouldIncludeRequestMessageHeader() {
assertThat(WebSocketResourceProvider.shouldIncludeRequestMessageHeader(HttpHeaders.X_FORWARDED_FOR)).isFalse();
assertThat(WebSocketResourceProvider.shouldIncludeRequestMessageHeader(HttpHeaders.USER_AGENT)).isTrue();
assertThat(WebSocketResourceProvider.shouldIncludeRequestMessageHeader("X-Signal-Receive-Stories")).isTrue();
}
|
@Asn1Property(tagNo = 0x30, converter = DigestsConverter.class)
public Map<Integer, byte[]> getDigests() {
return digests;
}
|
@Test
public void readRvig2014Cms() throws Exception {
final LdsSecurityObject ldsSecurityObject = mapper.read(
readFromCms("rvig2014"), LdsSecurityObject.class);
assertEquals(ImmutableSet.of(1, 2, 3, 14, 15), ldsSecurityObject.getDigests().keySet());
}
|
public byte tunGpeNp() {
return tunGpeNp;
}
|
@Test
public void testConstruction() {
final NiciraTunGpeNp tunGpeNp1 = new NiciraTunGpeNp(np1);
assertThat(tunGpeNp1, is(notNullValue()));
assertThat(tunGpeNp1.tunGpeNp(), is(np1));
}
|
private ListUtil() {
}
|
@Test
public void testListUtil() {
Assertions.assertNotNull(ListUtil.emptyIsDefault(Collections.emptyList(), Collections.singletonList(1)));
Assertions.assertNotNull(ListUtil.findFirst(Collections.singletonList(1), res -> res == 1));
Assertions.assertNull(ListUtil.findFirst(Collections.singletonList(1), res -> false));
Assertions.assertNull(ListUtil.findFirst(Collections.emptyList(), res -> false));
}
|
public static String hashpw(String password, String salt) throws IllegalArgumentException {
BCrypt B;
String real_salt;
byte passwordb[], saltb[], hashed[];
char minor = (char) 0;
int rounds, off = 0;
StringBuilder rs = new StringBuilder();
if (salt == null) {
throw new IllegalArgumentException("salt cannot be null");
}
int saltLength = salt.length();
if (saltLength < 28) {
throw new IllegalArgumentException("Invalid salt");
}
if (salt.charAt(0) != '$' || salt.charAt(1) != '2') {
throw new IllegalArgumentException("Invalid salt version");
}
if (salt.charAt(2) == '$') {
off = 3;
} else {
minor = salt.charAt(2);
if (minor != 'a' || salt.charAt(3) != '$') {
throw new IllegalArgumentException("Invalid salt revision");
}
off = 4;
}
if (saltLength - off < 25) {
throw new IllegalArgumentException("Invalid salt");
}
// Extract number of rounds
if (salt.charAt(off + 2) > '$') {
throw new IllegalArgumentException("Missing salt rounds");
}
rounds = Integer.parseInt(salt.substring(off, off + 2));
real_salt = salt.substring(off + 3, off + 25);
try {
passwordb = (password + (minor >= 'a' ? "\000" : "")).getBytes("UTF-8");
} catch (UnsupportedEncodingException uee) {
throw new AssertionError("UTF-8 is not supported");
}
saltb = decode_base64(real_salt, BCRYPT_SALT_LEN);
B = new BCrypt();
hashed = B.crypt_raw(passwordb, saltb, rounds);
rs.append("$2");
if (minor >= 'a') {
rs.append(minor);
}
rs.append("$");
if (rounds < 10) {
rs.append("0");
}
rs.append(rounds);
rs.append("$");
encode_base64(saltb, saltb.length, rs);
encode_base64(hashed, bf_crypt_ciphertext.length * 4 - 1, rs);
return rs.toString();
}
|
@Test
public void testHashpwSaltIsNull() throws IllegalArgumentException {
thrown.expect(IllegalArgumentException.class);
BCrypt.hashpw("foo", null);
}
|
public static Builder newIntegerColumnDefBuilder() {
return new Builder();
}
|
@Test
public void builder_setDefaultValue_sets_default_value_field_of_IntegerColumnDef() {
assertThat(newIntegerColumnDefBuilder().setColumnName("a").setDefaultValue(42).build().getDefaultValue()).isEqualTo(42);
}
|
@Override
public int run(String[] args) throws Exception {
if (args.length != 2) {
return usage(args);
}
String action = args[0];
String name = args[1];
int result;
if (A_LOAD.equals(action)) {
result = loadClass(name);
} else if (A_CREATE.equals(action)) {
//first load to separate load errors from create
result = loadClass(name);
if (result == SUCCESS) {
//class loads, so instantiate it
result = createClassInstance(name);
}
} else if (A_RESOURCE.equals(action)) {
result = loadResource(name);
} else if (A_PRINTRESOURCE.equals(action)) {
result = dumpResource(name);
} else {
result = usage(args);
}
return result;
}
|
@Test
public void testCreateFailsNoEmptyConstructor() throws Throwable {
run(FindClass.E_CREATE_FAILED,
FindClass.A_CREATE,
"org.apache.hadoop.util.TestFindClass$NoEmptyConstructor");
}
|
public ProtocolBuilder queues(Integer queues) {
this.queues = queues;
return getThis();
}
|
@Test
void queues() {
ProtocolBuilder builder = new ProtocolBuilder();
builder.queues(30);
Assertions.assertEquals(30, builder.build().getQueues());
}
|
public abstract T newEmptyValue(long timeMillis);
|
@Test
void testNewEmptyValue() {
assertEquals(0L, window.newEmptyValue(System.currentTimeMillis()).sum());
}
|
public String getBaseBonitaURI() {
return "http://" + hostname + ":" + port + "/bonita";
}
|
@Test
public void testBaseBonitaURL() {
BonitaAPIConfig config = new BonitaAPIConfig("host", "port", "username", "password");
assertEquals("http://host:port/bonita", config.getBaseBonitaURI());
}
|
@Override
protected LinkedHashMap<String, Callable<? extends ChannelHandler>> getChildChannelHandlers(MessageInput input) {
final LinkedHashMap<String, Callable<? extends ChannelHandler>> handlers = new LinkedHashMap<>();
final CodecAggregator aggregator = getAggregator();
handlers.put("channel-registration", () -> new ChannelRegistrationHandler(childChannels));
handlers.put("traffic-counter", () -> throughputCounter);
handlers.put("connection-counter", () -> connectionCounter);
if (tlsEnable) {
LOG.info("Enabled TLS for input {}. key-file=\"{}\" cert-file=\"{}\"", input.toIdentifier(), tlsKeyFile, tlsCertFile);
handlers.put("tls", getSslHandlerCallable(input));
}
handlers.putAll(getCustomChildChannelHandlers(input));
if (aggregator != null) {
LOG.debug("Adding codec aggregator {} to channel pipeline", aggregator);
handlers.put("codec-aggregator", () -> new ByteBufMessageAggregationHandler(aggregator, localRegistry));
}
handlers.put("rawmessage-handler", () -> new RawMessageHandler(input));
handlers.put("exception-logger", () -> new ExceptionLoggingChannelHandler(input, LOG, this.tcpKeepalive));
return handlers;
}
|
@Test
public void getChildChannelHandlersFailsIfTempDirDoesNotExist() throws IOException {
final File tmpDir = temporaryFolder.newFolder();
assumeTrue(tmpDir.delete());
System.setProperty("java.io.tmpdir", tmpDir.getAbsolutePath());
final Configuration configuration = new Configuration(ImmutableMap.of(
"bind_address", "localhost",
"port", 12345,
"tls_enable", true)
);
final AbstractTcpTransport transport = new AbstractTcpTransport(
configuration, throughputCounter, localRegistry, eventLoopGroup, eventLoopGroupFactory, nettyTransportConfiguration, tlsConfiguration) {};
expectedException.expect(IllegalStateException.class);
expectedException.expectMessage("Couldn't write to temporary directory: " + tmpDir.getAbsolutePath());
transport.getChildChannelHandlers(input);
}
|
public FEELFnResult<Map<String, Object>> invoke(@ParameterName("entries") List<Object> entries) {
if (entries == null) {
return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "entries", "cannot be null"));
}
Map<String, Object> result = new HashMap<>();
for (int i = 0; i < entries.size(); i++) {
final int h_index = i + 1;
if (entries.get(i) instanceof Map) {
Map<?, ?> map = (Map<?, ?>) entries.get(i);
String key;
Object value;
if (map.get("key") instanceof String) {
key = (String) map.get("key");
} else {
return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "entry of index " + (h_index) + " is missing a `key` entry"));
}
if (map.containsKey("value")) {
value = map.get("value");
} else {
return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "entry of index " + (h_index) + " is missing a `value` entry"));
}
if (result.containsKey(key)) {
return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "entry of index " + (h_index) + " contains duplicate key"));
}
result.put(key, value);
} else {
return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "entry of index " + (h_index) + " is not a valid context"));
}
}
return FEELFnResult.ofResult(result);
}
|
@Test
void invokeContainsNoKeyAndValue() {
FunctionTestUtil.assertResultError(contextFunction.invoke(List.of(
Map.of("test", "name", "value", "John Doe"),
Map.of("key", "name", "test", "John Doe"))), InvalidParametersEvent.class);
}
|
@Override
public KvMetadata resolveMetadata(
boolean isKey,
List<MappingField> resolvedFields,
Map<String, String> options,
InternalSerializationService serializationService
) {
Map<QueryPath, MappingField> fieldsByPath = extractFields(resolvedFields, isKey);
Entry<QueryDataType, Class<?>> entry = getTopLevelType(fieldsByPath)
.<Entry<QueryDataType, Class<?>>>map(type -> entry(type, loadClass(type.getObjectTypeMetadata())))
.orElseGet(() -> {
Class<?> typeClass = loadClass(options, isKey);
return entry(QueryDataTypeUtils.resolveTypeForClass(typeClass), typeClass);
});
QueryDataType type = entry.getKey();
Class<?> typeClass = entry.getValue();
if (type.getTypeFamily() != QueryDataTypeFamily.OBJECT || type.isCustomType()) {
return resolvePrimitiveMetadata(isKey, resolvedFields, fieldsByPath, type);
} else {
return resolveObjectMetadata(isKey, resolvedFields, fieldsByPath, typeClass);
}
}
|
@Test
@Parameters({
"true, __key",
"false, this"
})
public void test_resolvePrimitiveMetadata(boolean key, String path) {
Map<String, String> options = Map.of(
(key ? OPTION_KEY_FORMAT : OPTION_VALUE_FORMAT), JAVA_FORMAT,
(key ? OPTION_KEY_CLASS : OPTION_VALUE_CLASS), int.class.getName()
);
KvMetadata metadata = INSTANCE.resolveMetadata(
key,
singletonList(field(path, QueryDataType.INT, path)),
options,
null
);
assertThat(metadata.getFields()).containsExactly(
new MapTableField(path, QueryDataType.INT, false, QueryPath.create(path))
);
assertThat(metadata.getQueryTargetDescriptor()).isEqualTo(GenericQueryTargetDescriptor.DEFAULT);
assertThat(metadata.getUpsertTargetDescriptor()).isEqualTo(PrimitiveUpsertTargetDescriptor.INSTANCE);
}
|
public void updateTableStatistics(String dbName, String tableName, Function<HivePartitionStats, HivePartitionStats> update) {
org.apache.hadoop.hive.metastore.api.Table originTable = client.getTable(dbName, tableName);
if (originTable == null) {
throw new StarRocksConnectorException("Table '%s.%s' not found", dbName, tableName);
}
org.apache.hadoop.hive.metastore.api.Table newTable = originTable.deepCopy();
HiveCommonStats curCommonStats = toHiveCommonStats(originTable.getParameters());
HivePartitionStats curPartitionStats = new HivePartitionStats(curCommonStats, new HashMap<>());
HivePartitionStats updatedStats = update.apply(curPartitionStats);
HiveCommonStats commonStats = updatedStats.getCommonStats();
Map<String, String> originParams = newTable.getParameters();
originParams.put(TRANSIENT_LAST_DDL_TIME, String.valueOf(System.currentTimeMillis() / 1000));
newTable.setParameters(updateStatisticsParameters(originParams, commonStats));
client.alterTable(dbName, tableName, newTable);
//TODO(stephen): update table column statistics
}
|
@Test
public void testUpdateTableStatistics() {
HiveMetaClient client = new MockedHiveMetaClient();
HiveMetastore metastore = new HiveMetastore(client, "hive_catalog", MetastoreType.HMS);
HivePartitionStats partitionStats = HivePartitionStats.empty();
metastore.updateTableStatistics("db", "table", ignore -> partitionStats);
}
|
@Override
public void deconstruct(long generation, List<Object> components, Collection<Bundle> bundles) {
Collection<Deconstructable> destructibleComponents = new ArrayList<>();
for (var component : components) {
if (component instanceof AbstractComponent) {
AbstractComponent abstractComponent = (AbstractComponent) component;
if (abstractComponent.isDeconstructable()) {
destructibleComponents.add(abstractComponent);
}
} else if (component instanceof Provider) {
destructibleComponents.add((Deconstructable) component);
} else if (component instanceof SharedResource) {
// Release shared resources in same order as other components in case of usage without reference counting
destructibleComponents.add(new SharedResourceReleaser(component));
}
}
if (!destructibleComponents.isEmpty() || !bundles.isEmpty()) {
executor.execute(new DestructComponentTask(generation, destructibleComponents, bundles));
}
}
|
@Test
void require_abstract_component_destructed() throws InterruptedException {
TestAbstractComponent abstractComponent = new TestAbstractComponent();
deconstructor.deconstruct(0, List.of(abstractComponent), List.of());
waitForDeconstructToComplete(() -> abstractComponent.destructed);
assertTrue(abstractComponent.destructed);
}
|
@Override
@SuppressWarnings("checkstyle:magicnumber")
public void process(int ordinal, @Nonnull Inbox inbox) {
try {
switch (ordinal) {
case 0:
process0(inbox);
break;
case 1:
process1(inbox);
break;
case 2:
process2(inbox);
break;
case 3:
process3(inbox);
break;
case 4:
process4(inbox);
break;
default:
processAny(ordinal, inbox);
}
} catch (Exception e) {
throw sneakyThrow(e);
}
}
|
@Test
public void when_processInbox5_then_tryProcessCalled() {
// When
tryProcessP.process(ORDINAL_5, inbox);
// Then
tryProcessP.validateReceptionOfItem(ORDINAL_5, MOCK_ITEM);
}
|
public void recordMetric(long time, String command,
String user, long delta) {
RollingWindow window = getRollingWindow(command, user);
window.incAt(time, delta);
}
|
@Test
public void windowReset() throws Exception {
Configuration config = new Configuration();
config.setInt(DFSConfigKeys.NNTOP_BUCKETS_PER_WINDOW_KEY, 1);
config.setInt(DFSConfigKeys.NNTOP_NUM_USERS_KEY, N_TOP_USERS);
int period = 2;
RollingWindowManager rollingWindowManager =
new RollingWindowManager(config, period);
rollingWindowManager.recordMetric(0, "op1", users[0], 3);
checkValues(rollingWindowManager, 0, "op1", 3, 3);
checkValues(rollingWindowManager, period - 1, "op1", 3, 3);
checkValues(rollingWindowManager, period, "op1", 0, 0);
}
|
@Override
public Integer clusterGetSlotForKey(byte[] key) {
RFuture<Integer> f = executorService.readAsync((String)null, StringCodec.INSTANCE, RedisCommands.KEYSLOT, key);
return syncFuture(f);
}
|
@Test
public void testClusterGetSlotForKey() {
Integer slot = connection.clusterGetSlotForKey("123".getBytes());
assertThat(slot).isNotNull();
}
|
@VisibleForTesting
public void doSampling(long millisSinceLastSample) {
for (ExecutionStateTracker tracker : activeTrackers) {
tracker.takeSample(millisSinceLastSample);
}
}
|
@Test
public void testLullDetectionOccurs() throws Exception {
ExecutionStateTracker tracker1 = createTracker();
try (Closeable t1 = tracker1.activate(new Thread())) {
try (Closeable c = tracker1.enterState(step1act1)) {
sampler.doSampling(TimeUnit.MINUTES.toMillis(6));
}
}
assertThat(step1act1.lullReported, equalTo(true));
}
|
@PreAuthorize(value = "@permissionValidator.isSuperAdmin()")
@PostMapping("/users")
public void createOrUpdateUser(
@RequestParam(value = "isCreate", defaultValue = "false") boolean isCreate,
@RequestBody UserPO user) {
if (StringUtils.isContainEmpty(user.getUsername(), user.getPassword())) {
throw new BadRequestException("Username and password can not be empty.");
}
CheckResult pwdCheckRes = passwordChecker.checkWeakPassword(user.getPassword());
if (!pwdCheckRes.isSuccess()) {
throw new BadRequestException(pwdCheckRes.getMessage());
}
if (userService instanceof SpringSecurityUserService) {
if (isCreate) {
((SpringSecurityUserService) userService).create(user);
} else {
((SpringSecurityUserService) userService).update(user);
}
} else {
throw new UnsupportedOperationException("Create or update user operation is unsupported");
}
}
|
@Test(expected = BadRequestException.class)
public void testCreateOrUpdateUserFailed() {
UserPO user = new UserPO();
user.setUsername("username");
user.setPassword("password");
String msg = "fake error message";
Mockito.when(userPasswordChecker.checkWeakPassword(Mockito.anyString()))
.thenReturn(new CheckResult(Boolean.FALSE, msg));
try {
userInfoController.createOrUpdateUser(true, user);
} catch (BadRequestException e) {
Assert.assertEquals(msg, e.getMessage());
throw e;
}
}
|
public void deleteTenant(TenantName name) {
if (name.equals(DEFAULT_TENANT))
throw new IllegalArgumentException("Deleting 'default' tenant is not allowed");
if ( ! tenants.containsKey(name))
throw new IllegalArgumentException("Deleting '" + name + "' failed, tenant does not exist");
log.log(Level.INFO, "Deleting tenant '" + name + "'");
// Deletes the tenant tree from ZooKeeper (application and session status for the tenant)
// and triggers Tenant.close().
try (Lock lock = tenantLocks.lock(name)) {
Path path = tenants.get(name).getPath();
closeTenant(name);
curator.delete(path);
}
}
|
@Test
public void testDeleteTenant() throws Exception {
assertZooKeeperTenantPathExists(tenant1);
tenantRepository.deleteTenant(tenant1);
assertFalse(tenantRepository.getAllTenantNames().contains(tenant1));
try {
tenantRepository.deleteTenant(TenantName.from("non-existing"));
fail("deletion of non-existing tenant should fail");
} catch (IllegalArgumentException e) {
// expected
}
}
|
public SubscriptionStatsImpl add(SubscriptionStatsImpl stats) {
Objects.requireNonNull(stats);
this.msgRateOut += stats.msgRateOut;
this.msgThroughputOut += stats.msgThroughputOut;
this.bytesOutCounter += stats.bytesOutCounter;
this.msgOutCounter += stats.msgOutCounter;
this.msgRateRedeliver += stats.msgRateRedeliver;
this.messageAckRate += stats.messageAckRate;
this.chunkedMessageRate += stats.chunkedMessageRate;
this.msgBacklog += stats.msgBacklog;
this.backlogSize += stats.backlogSize;
this.msgBacklogNoDelayed += stats.msgBacklogNoDelayed;
this.msgDelayed += stats.msgDelayed;
this.unackedMessages += stats.unackedMessages;
this.type = stats.type;
this.msgRateExpired += stats.msgRateExpired;
this.totalMsgExpired += stats.totalMsgExpired;
this.isReplicated |= stats.isReplicated;
this.isDurable |= stats.isDurable;
if (this.consumers.size() != stats.consumers.size()) {
for (int i = 0; i < stats.consumers.size(); i++) {
ConsumerStatsImpl consumerStats = new ConsumerStatsImpl();
this.consumers.add(consumerStats.add(stats.consumers.get(i)));
}
} else {
for (int i = 0; i < stats.consumers.size(); i++) {
this.consumers.get(i).add(stats.consumers.get(i));
}
}
this.allowOutOfOrderDelivery |= stats.allowOutOfOrderDelivery;
this.consumersAfterMarkDeletePosition.putAll(stats.consumersAfterMarkDeletePosition);
this.nonContiguousDeletedMessagesRanges += stats.nonContiguousDeletedMessagesRanges;
this.nonContiguousDeletedMessagesRangesSerializedSize += stats.nonContiguousDeletedMessagesRangesSerializedSize;
if (this.earliestMsgPublishTimeInBacklog != 0 && stats.earliestMsgPublishTimeInBacklog != 0) {
this.earliestMsgPublishTimeInBacklog = Math.min(
this.earliestMsgPublishTimeInBacklog,
stats.earliestMsgPublishTimeInBacklog
);
} else {
this.earliestMsgPublishTimeInBacklog = Math.max(
this.earliestMsgPublishTimeInBacklog,
stats.earliestMsgPublishTimeInBacklog
);
}
this.delayedMessageIndexSizeInBytes += stats.delayedMessageIndexSizeInBytes;
this.subscriptionProperties.putAll(stats.subscriptionProperties);
this.filterProcessedMsgCount += stats.filterProcessedMsgCount;
this.filterAcceptedMsgCount += stats.filterAcceptedMsgCount;
this.filterRejectedMsgCount += stats.filterRejectedMsgCount;
this.filterRescheduledMsgCount += stats.filterRescheduledMsgCount;
stats.bucketDelayedIndexStats.forEach((k, v) -> {
TopicMetricBean topicMetricBean =
this.bucketDelayedIndexStats.computeIfAbsent(k, __ -> new TopicMetricBean());
topicMetricBean.name = v.name;
topicMetricBean.labelsAndValues = v.labelsAndValues;
topicMetricBean.value += v.value;
});
return this;
}
|
@Test
public void testAdd_EarliestMsgPublishTimeInBacklogs_Earliest() {
SubscriptionStatsImpl stats1 = new SubscriptionStatsImpl();
stats1.earliestMsgPublishTimeInBacklog = 10L;
SubscriptionStatsImpl stats2 = new SubscriptionStatsImpl();
stats2.earliestMsgPublishTimeInBacklog = 20L;
SubscriptionStatsImpl aggregate = stats1.add(stats2);
assertEquals(aggregate.earliestMsgPublishTimeInBacklog, 10L);
}
|
public Publisher<K> keyIterator() {
return keyIterator(null);
}
|
@Test
public void testPutAll() {
RMapRx<Integer, String> map = redisson.getMap("simple");
sync(map.put(1, "1"));
sync(map.put(2, "2"));
sync(map.put(3, "3"));
Map<Integer, String> joinMap = new HashMap<Integer, String>();
joinMap.put(4, "4");
joinMap.put(5, "5");
joinMap.put(6, "6");
sync(map.putAll(joinMap));
assertThat(toIterator(map.keyIterator())).toIterable().contains(1, 2, 3, 4, 5, 6);
}
|
public boolean requireStable() {
return data.requireStable();
}
|
@Test
public void testBuildThrowForUnsupportedRequireStable() {
for (int version : listOfVersionsNonBatchOffsetFetch) {
builder = new OffsetFetchRequest.Builder(group1, true, null, true);
if (version < 7) {
final short finalVersion = (short) version;
assertThrows(UnsupportedVersionException.class, () -> builder.build(finalVersion));
} else {
OffsetFetchRequest request = builder.build((short) version);
assertTrue(request.requireStable());
}
}
}
|
public long accumulateAndGet(long x, LongBinaryOperator f) {
long prev, next;
do {
prev = lvVal();
next = f.applyAsLong(prev, x);
} while (!casVal(prev, next));
return next;
}
|
@Test
public void testAccumulateAndGet() {
PaddedAtomicLong counter = new PaddedAtomicLong(10);
long value = counter.accumulateAndGet(1, (left, right) -> left+right);
assertEquals(value, 11);
assertEquals(11, counter.get());
}
|
public static String formatAnnotation(Annotation annotation) {
String annotationName = annotation.annotationType().getName();
String annotationNameWithoutPackage =
annotationName.substring(annotationName.lastIndexOf('.') + 1).replace('$', '.');
String annotationToString = annotation.toString();
String values = annotationToString.substring(annotationToString.indexOf('('));
return String.format("%s%s", annotationNameWithoutPackage, values);
}
|
@Test
public void testFormatAnnotationJsonIgnore() throws Exception {
assertEquals(
"JsonIgnore(value=true)",
ReflectHelpers.formatAnnotation(Options.class.getMethod("getObject").getAnnotations()[0]));
}
|
@Override
public void run() {
try { // make sure we call afterRun() even on crashes
// and operate countdown latches, else we may hang the parallel runner
if (steps == null) {
beforeRun();
}
if (skipped) {
return;
}
int count = steps.size();
int index = 0;
while ((index = nextStepIndex()) < count) {
currentStep = steps.get(index);
execute(currentStep);
if (currentStepResult != null) { // can be null if debug step-back or hook skip
result.addStepResult(currentStepResult);
}
}
} catch (Exception e) {
if (currentStepResult != null) {
result.addStepResult(currentStepResult);
}
logError("scenario [run] failed\n" + StringUtils.throwableToString(e));
currentStepResult = result.addFakeStepResult("scenario [run] failed", e);
} finally {
if (!skipped) {
afterRun();
if (isFailed() && engine.getConfig().isAbortSuiteOnFailure()) {
featureRuntime.suite.abort();
}
}
if (caller.isNone()) {
logAppender.close(); // reclaim memory
}
}
}
|
@Test
void testJsonPath() {
run(
"def foo = { a: 1, b: { a: 2 } }",
"def res1 = karate.jsonPath(foo, '$..a')",
"def arr = [ {'id': 1, 'name': 'test', 'age': 10}, {'id': 2, 'name': 'test2', 'age': 20} ]",
"def res2 = karate.jsonPath(arr, '$.[?(@.id == 1)].name')[0]"
);
matchVar("res1", "[1, 2]");
matchVar("res2", "test");
}
|
@Override
protected int command() {
if (!validateConfigFilePresent()) {
return 1;
}
final MigrationConfig config;
try {
config = MigrationConfig.load(getConfigFile());
} catch (KsqlException | MigrationException e) {
LOGGER.error(e.getMessage());
return 1;
}
return command(getMigrationsDir(getConfigFile(), config));
}
|
@Test
public void shouldFailIfVersionAlreadyExists() throws Exception {
// Given:
givenVersionsExist("12");
command = PARSER.parse(DESCRIPTION, "-v", "12");
// When:
final int result = command.command(migrationsDir);
// Then:
assertThat(result, is(1));
}
|
public Optional<Details> runForeachBatch(
Workflow workflow,
Long internalId,
long workflowVersionId,
RunProperties runProperties,
String foreachStepId,
ForeachArtifact artifact,
List<RunRequest> requests,
List<Long> instanceIds,
int batchSize) {
if (ObjectHelper.isCollectionEmptyOrNull(requests)) {
return Optional.empty();
}
Checks.checkTrue(
requests.size() == instanceIds.size(),
"Run request list size [%s] must match instance id list size [%s]",
requests.size(),
instanceIds.size());
List<WorkflowInstance> instances;
if (artifact.isFreshRun()) {
instances =
createStartForeachInstances(
workflow,
internalId,
workflowVersionId,
artifact.getForeachRunId(),
runProperties,
requests,
instanceIds);
} else {
instances =
createRestartForeachInstances(
workflow,
internalId,
workflowVersionId,
runProperties,
foreachStepId,
artifact,
requests,
instanceIds);
}
if (ObjectHelper.isCollectionEmptyOrNull(instances)) {
return Optional.empty();
}
return instanceDao.runWorkflowInstances(workflow.getId(), instances, batchSize);
}
|
@Test
public void testRestartRunForeachBatch() {
ForeachArtifact artifact = new ForeachArtifact();
artifact.setRunPolicy(RunPolicy.RESTART_FROM_INCOMPLETE);
artifact.setTotalLoopCount(10);
artifact.setForeachWorkflowId(instance.getWorkflowId());
artifact.setForeachRunId(3L);
RunRequest request =
RunRequest.builder()
.initiator(new ManualInitiator())
.currentPolicy(RunPolicy.START_FRESH_NEW_RUN)
.build();
Optional<Details> errors =
actionHandler.runForeachBatch(
definition.getWorkflow(),
123L,
10L,
new RunProperties(),
"foreach-step",
artifact,
Collections.singletonList(request),
Collections.singletonList(5L),
1);
assertFalse(errors.isPresent());
ArgumentCaptor<List<WorkflowInstance>> captor = ArgumentCaptor.forClass(List.class);
verify(instanceDao, times(1))
.runWorkflowInstances(eq(artifact.getForeachWorkflowId()), captor.capture(), eq(1));
List<WorkflowInstance> res = captor.getValue();
assertEquals(1, res.size());
assertEquals(artifact.getForeachWorkflowId(), res.get(0).getWorkflowId());
assertEquals(5L, res.get(0).getWorkflowInstanceId());
assertEquals(3L, res.get(0).getWorkflowRunId());
assertEquals(10L, res.get(0).getWorkflowVersionId());
}
|
public void encryptColumns(
String inputFile, String outputFile, List<String> paths, FileEncryptionProperties fileEncryptionProperties)
throws IOException {
Path inPath = new Path(inputFile);
Path outPath = new Path(outputFile);
RewriteOptions options = new RewriteOptions.Builder(conf, inPath, outPath)
.encrypt(paths)
.encryptionProperties(fileEncryptionProperties)
.build();
ParquetRewriter rewriter = new ParquetRewriter(options);
rewriter.processBlocks();
rewriter.close();
}
|
@Test
public void testDifferentCompression() throws IOException {
String[] encryptColumns = {"Links.Forward"};
String[] compressions = {"GZIP", "ZSTD", "SNAPPY", "UNCOMPRESSED"};
for (String compression : compressions) {
testSetup(compression);
columnEncryptor.encryptColumns(
inputFile.getFileName(),
outputFile,
Arrays.asList(encryptColumns),
EncDecProperties.getFileEncryptionProperties(encryptColumns, ParquetCipher.AES_GCM_CTR_V1, false));
verifyResultDecryptionWithValidKey();
}
}
|
public int startWithRunStrategy(
@NotNull WorkflowInstance instance, @NotNull RunStrategy runStrategy) {
return withMetricLogError(
() ->
withRetryableTransaction(
conn -> {
final long nextInstanceId =
getLatestInstanceId(conn, instance.getWorkflowId()) + 1;
if (isDuplicated(conn, instance)) {
return 0;
}
completeInstanceInit(conn, nextInstanceId, instance);
int res;
if (instance.getStatus().isTerminal()) {
// Save it directly and send a terminate event
res = addTerminatedInstance(conn, instance);
} else {
switch (runStrategy.getRule()) {
case SEQUENTIAL:
case PARALLEL:
case STRICT_SEQUENTIAL:
res = insertInstance(conn, instance, true, null);
break;
case FIRST_ONLY:
res = startFirstOnlyInstance(conn, instance);
break;
case LAST_ONLY:
res = startLastOnlyInstance(conn, instance);
break;
default:
throw new MaestroInternalError(
"When start, run strategy [%s] is not supported.", runStrategy);
}
}
if (instance.getWorkflowInstanceId() == nextInstanceId) {
updateLatestInstanceId(conn, instance.getWorkflowId(), nextInstanceId);
}
return res;
}),
"startWithRunStrategy",
"Failed to start a workflow [{}][{}] with run strategy [{}]",
instance.getWorkflowId(),
instance.getWorkflowUuid(),
runStrategy);
}
|
@Test
public void testStartWithRunStrategyForNewStart() {
wfi.setWorkflowInstanceId(0L);
wfi.setWorkflowRunId(0L);
wfi.setWorkflowUuid("test-uuid");
int res = runStrategyDao.startWithRunStrategy(wfi, Defaults.DEFAULT_RUN_STRATEGY);
assertEquals(1, res);
assertEquals(2, wfi.getWorkflowInstanceId());
assertEquals(1, wfi.getWorkflowRunId());
assertEquals("test-uuid", wfi.getWorkflowUuid());
WorkflowInstance latestRun =
dao.getLatestWorkflowInstanceRun(wfi.getWorkflowId(), wfi.getWorkflowInstanceId());
assertEquals(2, latestRun.getWorkflowInstanceId());
assertEquals("test-uuid", latestRun.getWorkflowUuid());
verifyPublish(1, 0, 0, 0, 0);
MaestroTestHelper.removeWorkflowInstance(dataSource, TEST_WORKFLOW_ID, 2);
}
|
public static String getRootNodePath() {
return String.join("/", "", ROOT_NODE);
}
|
@Test
void assertRooPath() {
assertThat(ListenerAssistedNodePath.getRootNodePath(), is("/listener_assisted"));
}
|
public FEELFnResult<Boolean> invoke(@ParameterName( "point" ) Comparable point, @ParameterName( "range" ) Range range) {
if ( point == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point", "cannot be null"));
}
if ( range == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "range", "cannot be null"));
}
try {
boolean result = ( range.getHighBoundary() == Range.RangeBoundary.CLOSED && point.compareTo( range.getHighEndPoint() ) == 0 );
return FEELFnResult.ofResult( result );
} catch( Exception e ) {
// points are not comparable
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point", "cannot be compared to range"));
}
}
|
@Test
void invokeParamRangeAndRange() {
FunctionTestUtil.assertResult( finishesFunction.invoke(
new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ),
new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ) ),
Boolean.TRUE );
FunctionTestUtil.assertResult( finishesFunction.invoke(
new RangeImpl( Range.RangeBoundary.CLOSED, "c", "f", Range.RangeBoundary.CLOSED ),
new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ) ),
Boolean.TRUE );
FunctionTestUtil.assertResult( finishesFunction.invoke(
new RangeImpl( Range.RangeBoundary.CLOSED, "c", "f", Range.RangeBoundary.CLOSED ),
new RangeImpl( Range.RangeBoundary.CLOSED, "e", "f", Range.RangeBoundary.CLOSED ) ),
Boolean.FALSE );
FunctionTestUtil.assertResult( finishesFunction.invoke(
new RangeImpl( Range.RangeBoundary.CLOSED, "c", "f", Range.RangeBoundary.CLOSED ),
new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.OPEN ) ),
Boolean.FALSE );
}
|
@Override
public void execute(ComputationStep.Context context) {
new PathAwareCrawler<>(
FormulaExecutorComponentVisitor.newBuilder(metricRepository, measureRepository)
.buildFor(List.of(duplicationFormula)))
.visit(treeRootHolder.getRoot());
}
|
@Test
public void compute_duplicated_lines_counts_lines_from_original_and_ignores_CrossProjectDuplicate() {
TextBlock original = new TextBlock(1, 1);
duplicationRepository.addCrossProjectDuplication(FILE_1_REF, original, SOME_FILE_KEY, new TextBlock(2, 2));
setNewLines(FILE_1);
underTest.execute(new TestComputationStepContext());
assertRawMeasureValue(FILE_1_REF, NEW_DUPLICATED_LINES_KEY, 1);
}
|
public static Buffer toBuffer(AbstractEvent event, boolean hasPriority) throws IOException {
final ByteBuffer serializedEvent = EventSerializer.toSerializedEvent(event);
MemorySegment data = MemorySegmentFactory.wrap(serializedEvent.array());
final Buffer buffer =
new NetworkBuffer(
data, FreeingBufferRecycler.INSTANCE, getDataType(event, hasPriority));
buffer.setSize(serializedEvent.remaining());
return buffer;
}
|
@Test
void testToBuffer() throws IOException {
for (AbstractEvent evt : events) {
Buffer buffer = EventSerializer.toBuffer(evt, false);
assertThat(buffer.isBuffer()).isFalse();
assertThat(buffer.readableBytes()).isGreaterThan(0);
assertThat(buffer.isRecycled()).isFalse();
if (evt instanceof CheckpointBarrier) {
assertThat(buffer.getDataType().isBlockingUpstream()).isTrue();
} else if (evt instanceof EndOfData) {
assertThat(buffer.getDataType()).isEqualTo(Buffer.DataType.END_OF_DATA);
} else if (evt instanceof EndOfPartitionEvent) {
assertThat(buffer.getDataType()).isEqualTo(Buffer.DataType.END_OF_PARTITION);
} else {
assertThat(buffer.getDataType()).isEqualTo(Buffer.DataType.EVENT_BUFFER);
}
}
}
|
@SuppressWarnings("deprecation")
public boolean setSocketOpt(int option, Object optval)
{
final ValueReference<Boolean> result = new ValueReference<>(false);
switch (option) {
case ZMQ.ZMQ_SNDHWM:
sendHwm = (Integer) optval;
if (sendHwm < 0) {
throw new IllegalArgumentException("sendHwm " + optval);
}
return true;
case ZMQ.ZMQ_RCVHWM:
recvHwm = (Integer) optval;
if (recvHwm < 0) {
throw new IllegalArgumentException("recvHwm " + optval);
}
return true;
case ZMQ.ZMQ_AFFINITY:
affinity = (Long) optval;
return true;
case ZMQ.ZMQ_IDENTITY:
byte[] val = parseBytes(option, optval);
if (val == null || val.length > 255) {
throw new IllegalArgumentException("identity must not be null or less than 255 " + optval);
}
identity = Arrays.copyOf(val, val.length);
identitySize = (short) identity.length;
return true;
case ZMQ.ZMQ_RATE:
rate = (Integer) optval;
return true;
case ZMQ.ZMQ_RECOVERY_IVL:
recoveryIvl = (Integer) optval;
return true;
case ZMQ.ZMQ_SNDBUF:
sndbuf = (Integer) optval;
return true;
case ZMQ.ZMQ_RCVBUF:
rcvbuf = (Integer) optval;
return true;
case ZMQ.ZMQ_TOS:
tos = (Integer) optval;
return true;
case ZMQ.ZMQ_LINGER:
linger = (Integer) optval;
return true;
case ZMQ.ZMQ_RECONNECT_IVL:
reconnectIvl = (Integer) optval;
if (reconnectIvl < -1) {
throw new IllegalArgumentException("reconnectIvl " + optval);
}
return true;
case ZMQ.ZMQ_RECONNECT_IVL_MAX:
reconnectIvlMax = (Integer) optval;
if (reconnectIvlMax < 0) {
throw new IllegalArgumentException("reconnectIvlMax " + optval);
}
return true;
case ZMQ.ZMQ_BACKLOG:
backlog = (Integer) optval;
return true;
case ZMQ.ZMQ_MAXMSGSIZE:
maxMsgSize = (Long) optval;
return true;
case ZMQ.ZMQ_MULTICAST_HOPS:
multicastHops = (Integer) optval;
return true;
case ZMQ.ZMQ_RCVTIMEO:
recvTimeout = (Integer) optval;
return true;
case ZMQ.ZMQ_SNDTIMEO:
sendTimeout = (Integer) optval;
return true;
/* Deprecated in favor of ZMQ_IPV6 */
case ZMQ.ZMQ_IPV4ONLY:
return setSocketOpt(ZMQ.ZMQ_IPV6, !parseBoolean(option, optval));
/* To replace the somewhat surprising IPV4ONLY */
case ZMQ.ZMQ_IPV6:
ipv6 = parseBoolean(option, optval);
return true;
case ZMQ.ZMQ_SOCKS_PROXY:
socksProxyAddress = parseString(option, optval);
return true;
case ZMQ.ZMQ_TCP_KEEPALIVE:
tcpKeepAlive = ((Number) optval).intValue();
if (tcpKeepAlive != -1 && tcpKeepAlive != 0 && tcpKeepAlive != 1) {
throw new IllegalArgumentException("tcpKeepAlive only accepts one of -1,0,1 " + optval);
}
return true;
case ZMQ.ZMQ_TCP_KEEPALIVE_CNT:
this.tcpKeepAliveCnt = ((Number) optval).intValue();
return true;
case ZMQ.ZMQ_TCP_KEEPALIVE_IDLE:
this.tcpKeepAliveIdle = ((Number) optval).intValue();
return true;
case ZMQ.ZMQ_TCP_KEEPALIVE_INTVL:
this.tcpKeepAliveIntvl = ((Number) optval).intValue();
return true;
case ZMQ.ZMQ_IMMEDIATE:
immediate = parseBoolean(option, optval);
return true;
case ZMQ.ZMQ_DELAY_ATTACH_ON_CONNECT:
immediate = !parseBoolean(option, optval);
return true;
case ZMQ.ZMQ_TCP_ACCEPT_FILTER:
String filterStr = parseString(option, optval);
if (filterStr == null) {
tcpAcceptFilters.clear();
}
else if (filterStr.isEmpty() || filterStr.length() > 255) {
throw new IllegalArgumentException("tcp_accept_filter " + optval);
}
else {
TcpAddressMask filter = new TcpAddressMask(filterStr, ipv6);
tcpAcceptFilters.add(filter);
}
return true;
case ZMQ.ZMQ_PLAIN_SERVER:
asServer = parseBoolean(option, optval);
mechanism = (asServer ? Mechanisms.PLAIN : Mechanisms.NULL);
return true;
case ZMQ.ZMQ_PLAIN_USERNAME:
if (optval == null) {
mechanism = Mechanisms.NULL;
asServer = false;
return true;
}
plainUsername = parseString(option, optval);
asServer = false;
mechanism = Mechanisms.PLAIN;
return true;
case ZMQ.ZMQ_PLAIN_PASSWORD:
if (optval == null) {
mechanism = Mechanisms.NULL;
asServer = false;
return true;
}
plainPassword = parseString(option, optval);
asServer = false;
mechanism = Mechanisms.PLAIN;
return true;
case ZMQ.ZMQ_ZAP_DOMAIN:
String domain = parseString(option, optval);
if (domain != null && domain.length() < 256) {
zapDomain = domain;
return true;
}
throw new IllegalArgumentException("zap domain length shall be < 256 : " + optval);
case ZMQ.ZMQ_CURVE_SERVER:
asServer = parseBoolean(option, optval);
mechanism = (asServer ? Mechanisms.CURVE : Mechanisms.NULL);
return true;
case ZMQ.ZMQ_CURVE_PUBLICKEY:
curvePublicKey = setCurveKey(option, optval, result);
return result.get();
case ZMQ.ZMQ_CURVE_SECRETKEY:
curveSecretKey = setCurveKey(option, optval, result);
return result.get();
case ZMQ.ZMQ_CURVE_SERVERKEY:
curveServerKey = setCurveKey(option, optval, result);
if (curveServerKey == null) {
asServer = false;
}
return result.get();
case ZMQ.ZMQ_CONFLATE:
conflate = parseBoolean(option, optval);
return true;
case ZMQ.ZMQ_GSSAPI_SERVER:
asServer = parseBoolean(option, optval);
mechanism = Mechanisms.GSSAPI;
return true;
case ZMQ.ZMQ_GSSAPI_PRINCIPAL:
gssPrincipal = parseString(option, optval);
mechanism = Mechanisms.GSSAPI;
return true;
case ZMQ.ZMQ_GSSAPI_SERVICE_PRINCIPAL:
gssServicePrincipal = parseString(option, optval);
mechanism = Mechanisms.GSSAPI;
return true;
case ZMQ.ZMQ_GSSAPI_PLAINTEXT:
gssPlaintext = parseBoolean(option, optval);
return true;
case ZMQ.ZMQ_HANDSHAKE_IVL:
handshakeIvl = (Integer) optval;
if (handshakeIvl < 0) {
throw new IllegalArgumentException("handshakeIvl only accept positive values " + optval);
}
return true;
case ZMQ.ZMQ_HEARTBEAT_IVL:
heartbeatInterval = (Integer) optval;
if (heartbeatInterval < 0) {
throw new IllegalArgumentException("heartbeatInterval only accept positive values " + optval);
}
return true;
case ZMQ.ZMQ_HEARTBEAT_TIMEOUT:
heartbeatTimeout = (Integer) optval;
if (heartbeatTimeout < 0) {
throw new IllegalArgumentException("heartbeatTimeout only accept positive values " + optval);
}
return true;
case ZMQ.ZMQ_HEARTBEAT_TTL:
Integer value = (Integer) optval;
// Convert this to deciseconds from milliseconds
value /= 100;
if (value >= 0 && value <= 6553) {
heartbeatTtl = value;
}
else {
throw new IllegalArgumentException("heartbeatTtl is out of range [0..655399]" + optval);
}
return true;
case ZMQ.ZMQ_HEARTBEAT_CONTEXT:
heartbeatContext = (byte[]) optval;
if (heartbeatContext == null) {
throw new IllegalArgumentException("heartbeatContext cannot be null");
}
return true;
case ZMQ.ZMQ_DECODER:
decoder = checkCustomCodec(optval, IDecoder.class);
rawSocket = true;
// failure throws ZError.InstantiationException
// if that line is reached, everything is fine
return true;
case ZMQ.ZMQ_ENCODER:
encoder = checkCustomCodec(optval, IEncoder.class);
rawSocket = true;
// failure throws ZError.InstantiationException
// if that line is reached, everything is fine
return true;
case ZMQ.ZMQ_MSG_ALLOCATOR:
if (optval instanceof String) {
try {
allocator = allocator(Class.forName((String) optval));
return true;
}
catch (ClassNotFoundException e) {
throw new IllegalArgumentException(e);
}
}
else if (optval instanceof Class) {
allocator = allocator((Class<?>) optval);
return true;
}
else if (optval instanceof MsgAllocator) {
allocator = (MsgAllocator) optval;
return true;
}
return false;
case ZMQ.ZMQ_MSG_ALLOCATION_HEAP_THRESHOLD:
Integer allocationHeapThreshold = (Integer) optval;
allocator = new MsgAllocatorThreshold(allocationHeapThreshold);
return true;
case ZMQ.ZMQ_SELECTOR_PROVIDERCHOOSER:
if (optval instanceof String) {
try {
selectorChooser = chooser(Class.forName((String) optval));
return true;
}
catch (ClassNotFoundException e) {
throw new IllegalArgumentException(e);
}
}
else if (optval instanceof Class) {
selectorChooser = chooser((Class<?>) optval);
return true;
}
else if (optval instanceof SelectorProviderChooser) {
selectorChooser = (SelectorProviderChooser) optval;
return true;
}
return false;
case ZMQ.ZMQ_HELLO_MSG:
if (optval == null) {
helloMsg = null;
}
else {
byte[] bytes = parseBytes(option, optval);
if (bytes.length == 0) {
helloMsg = null;
}
else {
helloMsg = new Msg(Arrays.copyOf(bytes, bytes.length));
}
}
return true;
case ZMQ.ZMQ_DISCONNECT_MSG:
if (optval == null) {
disconnectMsg = null;
}
else {
byte[] bytes = parseBytes(option, optval);
if (bytes.length == 0) {
disconnectMsg = null;
}
else {
disconnectMsg = new Msg(Arrays.copyOf(bytes, bytes.length));
}
}
return true;
case ZMQ.ZMQ_HICCUP_MSG:
if (optval == null) {
hiccupMsg = null;
}
else {
byte[] bytes = parseBytes(option, optval);
if (bytes.length == 0) {
hiccupMsg = null;
}
else {
hiccupMsg = new Msg(Arrays.copyOf(bytes, bytes.length));
}
}
return true;
case ZMQ.ZMQ_AS_TYPE:
this.asType = (Integer) optval;
return true;
case ZMQ.ZMQ_SELFADDR_PROPERTY_NAME:
this.selfAddressPropertyName = parseString(option, optval);
return true;
default:
throw new IllegalArgumentException("Unknown Option " + option);
}
}
|
@Test(expected = IllegalArgumentException.class)
public void testHeartbeatIvlUnderflow()
{
options.setSocketOpt(ZMQ.ZMQ_HEARTBEAT_IVL, -1);
}
|
public LogicalSchema resolve(final ExecutionStep<?> step, final LogicalSchema schema) {
return Optional.ofNullable(HANDLERS.get(step.getClass()))
.map(h -> h.handle(this, schema, step))
.orElseThrow(() -> new IllegalStateException("Unhandled step class: " + step.getClass()));
}
|
@Test
public void shouldResolveSchemaForStreamGroupBy() {
// Given:
final StreamGroupBy<?> step = new StreamGroupBy<>(
PROPERTIES,
streamSource,
formats,
ImmutableList.of(new UnqualifiedColumnReferenceExp(Optional.empty(), ORANGE_COL))
);
// When:
final LogicalSchema result = resolver.resolve(step, SCHEMA);
// Then:
assertThat(result, is(LogicalSchema.builder()
.keyColumn(ORANGE_COL, SqlTypes.INTEGER)
.valueColumns(SCHEMA.value())
.build()));
}
|
public boolean addMetadataString(String rawMetadata) throws UnmarshallingException {
InputStream inputStream = new ByteArrayInputStream(rawMetadata.getBytes(UTF_8));
XMLObject metadata = super.unmarshallMetadata(inputStream);
if (!isValid(metadata)) {
return false;
}
if (metadata instanceof EntitiesDescriptor) {
this.entitiesDescriptor = (EntitiesDescriptor) metadata;
}
if (metadata instanceof EntityDescriptor) {
this.entityDescriptor = (EntityDescriptor) metadata;
}
return true;
}
|
@Test
public void addMetadataStringTest() throws UnmarshallingException {
assertTrue(stringMetadataResolver.addMetadataString(metadata));
}
|
public List<String> toList(boolean trim) {
return toList((str) -> trim ? StrUtil.trim(str) : str);
}
|
@Test
public void splitByCharIgnoreCaseTest(){
String str1 = "a, ,,eAedsas, ddf,";
//不忽略""
SplitIter splitIter = new SplitIter(str1,
new CharFinder('a', true),
Integer.MAX_VALUE,
false
);
assertEquals(4, splitIter.toList(false).size());
}
|
@Override
public String pwd() {
try {
return client.printWorkingDirectory();
} catch (IOException e) {
throw new IORuntimeException(e);
}
}
|
@Test
@Disabled
public void existSftpTest() throws Exception {
try (final Sftp ftp = new Sftp("127.0.0.1", 22, "test", "test")) {
Console.log(ftp.pwd());
Console.log(ftp.exist(null));
Console.log(ftp.exist(""));
Console.log(ftp.exist("."));
Console.log(ftp.exist(".."));
Console.log(ftp.exist("/"));
Console.log(ftp.exist("a"));
Console.log(ftp.exist("/home/test"));
Console.log(ftp.exist("/home/test/"));
Console.log(ftp.exist("/home/test//////"));
Console.log(ftp.exist("/home/test/file1"));
Console.log(ftp.exist("/home/test/file1/"));
Console.log(ftp.exist("///////////"));
Console.log(ftp.exist("./"));
Console.log(ftp.exist("./file1"));
Console.log(ftp.pwd());
}
}
|
@Override
public void createRouter(KubevirtRouter router) {
checkNotNull(router, ERR_NULL_ROUTER);
checkArgument(!Strings.isNullOrEmpty(router.name()), ERR_NULL_ROUTER_NAME);
kubevirtRouterStore.createRouter(router);
log.info(String.format(MSG_ROUTER, router.name(), MSG_CREATED));
}
|
@Test(expected = IllegalArgumentException.class)
public void createDuplicateRouter() {
target.createRouter(ROUTER);
target.createRouter(ROUTER);
}
|
static KiePMMLMissingValueWeights getKiePMMLMissingValueWeights(MissingValueWeights missingValueWeights) {
return missingValueWeights != null ? new KiePMMLMissingValueWeights(getMissingValueWeightsDoubleValues(missingValueWeights)) : null;
}
|
@Test
void getKiePMMLMissingValueWeights() {
assertThat(KiePMMLClusteringModelFactory.getKiePMMLMissingValueWeights(null)).isNull();
KiePMMLMissingValueWeights retrieved =
KiePMMLClusteringModelFactory.getKiePMMLMissingValueWeights(new MissingValueWeights());
assertThat(retrieved).isNotNull();
assertThat(retrieved.getValues()).isNotNull();
assertThat(retrieved.getValues()).isEmpty();
MissingValueWeights missingValueWeights = new MissingValueWeights();
final Random random = new Random();
final List<Double> doubleValues =
IntStream.range(0, 3).mapToObj(i -> random.nextDouble()).collect(Collectors.toList());
final List<String> values = doubleValues.stream().map(String::valueOf).collect(Collectors.toList());
Array array = getArray(Array.Type.REAL, values);
missingValueWeights.setArray(array);
retrieved =
KiePMMLClusteringModelFactory.getKiePMMLMissingValueWeights(missingValueWeights);
commonEvaluateKiePMMLMissingValueWeights(retrieved, missingValueWeights);
}
|
@Override
public void createLoadBalancer(KubevirtLoadBalancer lb) {
checkNotNull(lb, ERR_NULL_LOAD_BALANCER);
checkArgument(!Strings.isNullOrEmpty(lb.name()), ERR_NULL_LOAD_BALANCER_NAME);
kubevirtLoadBalancerStore.createLoadBalancer(lb);
log.info(String.format(MSG_LOAD_BALANCER, lb.name(), MSG_CREATED));
}
|
@Test(expected = NullPointerException.class)
public void testCreateNullLoadBalancer() {
target.createLoadBalancer(null);
}
|
@Override
public Map<String, Long> countByType() {
final Map<String, Long> outputsCountByType = new HashMap<>();
try (DBCursor outputTypes = dbCollection.find(null, new BasicDBObject(OutputImpl.FIELD_TYPE, 1))) {
for (DBObject outputType : outputTypes) {
final String type = (String) outputType.get(OutputImpl.FIELD_TYPE);
if (type != null) {
final Long oldValue = outputsCountByType.get(type);
final Long newValue = (oldValue == null) ? 1 : oldValue + 1;
outputsCountByType.put(type, newValue);
}
}
}
return outputsCountByType;
}
|
@Test
@MongoDBFixtures("OutputServiceImplTest.json")
public void countByTypeReturnsNumberOfOutputsByType() {
assertThat(outputService.countByType())
.hasSize(2)
.containsEntry("org.graylog2.outputs.LoggingOutput", 1L)
.containsEntry("org.graylog2.outputs.GelfOutput", 1L);
}
|
public static UserAgent parse(String userAgentString) {
return UserAgentParser.parse(userAgentString);
}
|
@Test
public void parseWindows10WithIe8EmulatorTest() {
final String uaStr = "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0)";
final UserAgent ua = UserAgentUtil.parse(uaStr);
assertEquals("MSIE", ua.getBrowser().toString());
assertEquals("8.0", ua.getVersion());
assertEquals("Trident", ua.getEngine().toString());
assertEquals("4.0", ua.getEngineVersion());
assertEquals("Windows 7 or Windows Server 2008R2", ua.getOs().toString());
assertEquals("6.1", ua.getOsVersion());
assertEquals("Windows", ua.getPlatform().toString());
assertFalse(ua.isMobile());
}
|
@Override
public Optional<String> getValue(Object arg, String type) {
return arg == null ? Optional.empty() : Optional.of(String.valueOf(arg));
}
|
@Test
public void testValue() {
TypeStrategy strategy = new EmptyTypeStrategy();
// normal
Assert.assertEquals("foo", strategy.getValue("foo", "").orElse(null));
// normal
Assert.assertEquals("foo", strategy.getValue("foo", null).orElse(null));
// the test is not equal
Assert.assertNotEquals("foo", strategy.getValue("bar", null).orElse(null));
// test null
Assert.assertNotEquals("foo", strategy.getValue(null, null).orElse(null));
}
|
@Override
public int delete(String id) {
return this.coll.removeById(id).getN();
}
|
@Test
@MongoDBFixtures("DecoratorServiceImplTest.json")
public void delete() {
assertThat(decoratorService.findAll()).hasSize(3);
assertThat(decoratorService.delete("588bcafebabedeadbeef0001")).isEqualTo(1);
assertThat(decoratorService.findAll()).hasSize(2);
assertThat(decoratorService.delete("588bcafebabedeadbeef0001")).isEqualTo(0);
assertThat(decoratorService.delete("588bcafebabedeadbeef9999")).isEqualTo(0);
}
|
@Override
public <T> void register(Class<T> remoteInterface, T object) {
register(remoteInterface, object, 1);
}
|
@Test
public void testInvocationWithServiceName() {
RedissonClient server = createInstance();
RedissonClient client = createInstance();
server.getRemoteService("MyServiceNamespace").register(RemoteInterface.class, new RemoteImpl());
RemoteInterface serviceRemoteInterface = client.getRemoteService("MyServiceNamespace").get(RemoteInterface.class);
RemoteInterface otherServiceRemoteInterface = client.getRemoteService("MyOtherServiceNamespace").get(RemoteInterface.class);
RemoteInterface defaultServiceRemoteInterface = client.getRemoteService().get(RemoteInterface.class);
assertThat(serviceRemoteInterface.resultMethod(21L)).isEqualTo(42L);
// Invoking a service in an unregistered custom services namespace should throw
Assertions.assertThrows(RemoteServiceAckTimeoutException.class, () -> {
otherServiceRemoteInterface.resultMethod(21L);
});
// Invoking a service in the unregistered default services namespace should throw
Assertions.assertThrows(RemoteServiceAckTimeoutException.class, () -> {
defaultServiceRemoteInterface.resultMethod(21L);
});
client.shutdown();
server.shutdown();
}
|
public static void log() {
out.println();
}
|
@Test
public void logTest(){
Console.log();
String[] a = {"abc", "bcd", "def"};
Console.log(a);
Console.log("This is Console log for {}.", "test");
}
|
@Override
public void setParentJobMeta( JobMeta parentJobMeta ) {
JobMeta previous = getParentJobMeta();
super.setParentJobMeta( parentJobMeta );
if ( parentJobMeta != null ) {
parentJobMeta.addCurrentDirectoryChangedListener( currentDirListener );
variables.setParentVariableSpace( parentJobMeta );
} else if ( previous != null ) {
previous.removeCurrentDirectoryChangedListener( currentDirListener );
}
}
|
@Test
public void testCurrDirListener() throws Exception {
JobMeta meta = mock( JobMeta.class );
JobEntryTrans jet = getJobEntryTrans();
jet.setParentJobMeta( meta );
jet.setParentJobMeta( null );
verify( meta, times( 1 ) ).addCurrentDirectoryChangedListener( any() );
verify( meta, times( 1 ) ).removeCurrentDirectoryChangedListener( any() );
}
|
@Override
public SchemaResult getKeySchema(
final Optional<String> topicName,
final Optional<Integer> schemaId,
final FormatInfo expectedFormat,
final SerdeFeatures serdeFeatures
) {
return getSchema(topicName, schemaId, expectedFormat, serdeFeatures, true);
}
|
@Test
public void shouldRequestCorrectSchemaOnGetKeySchema() throws Exception {
// When:
supplier.getKeySchema(Optional.of(TOPIC_NAME),
Optional.empty(), expectedFormat, SerdeFeatures.of(SerdeFeature.UNWRAP_SINGLES));
// Then:
verify(srClient).getLatestSchemaMetadata(TOPIC_NAME + "-key");
}
|
public static void addMaskForCredential(Map<String, String> params) {
params.computeIfPresent(CloudConfigurationConstants.AWS_S3_ACCESS_KEY, (key, value) -> CREDENTIAL_MASK);
params.computeIfPresent(CloudConfigurationConstants.AWS_S3_SECRET_KEY, (key, value) -> CREDENTIAL_MASK);
params.computeIfPresent(CloudConfigurationConstants.AZURE_BLOB_SHARED_KEY, (key, value) -> CREDENTIAL_MASK);
params.computeIfPresent(CloudConfigurationConstants.AZURE_BLOB_SAS_TOKEN, (key, value) -> CREDENTIAL_MASK);
}
|
@Test
public void testAddMaskForCredential() {
Map<String, String> storageParams = new HashMap<>();
storageParams.put(AWS_S3_ACCESS_KEY, "accessKey");
storageParams.put(AWS_S3_SECRET_KEY, "secretKey");
storageParams.put(AZURE_BLOB_SAS_TOKEN, "sasToken");
storageParams.put(AZURE_BLOB_SHARED_KEY, "sharedKey");
Deencapsulation.invoke(StorageVolume.class, "addMaskForCredential", storageParams);
Assert.assertEquals(StorageVolume.CREDENTIAL_MASK, storageParams.get(AWS_S3_ACCESS_KEY));
Assert.assertEquals(StorageVolume.CREDENTIAL_MASK, storageParams.get(AWS_S3_SECRET_KEY));
Assert.assertEquals(StorageVolume.CREDENTIAL_MASK, storageParams.get(AZURE_BLOB_SAS_TOKEN));
Assert.assertEquals(StorageVolume.CREDENTIAL_MASK, storageParams.get(AZURE_BLOB_SHARED_KEY));
}
|
public static Catalog loadCatalog(
String impl, String catalogName, Map<String, String> properties, Object hadoopConf) {
Preconditions.checkNotNull(impl, "Cannot initialize custom Catalog, impl class name is null");
DynConstructors.Ctor<Catalog> ctor;
try {
ctor = DynConstructors.builder(Catalog.class).impl(impl).buildChecked();
} catch (NoSuchMethodException e) {
throw new IllegalArgumentException(
String.format("Cannot initialize Catalog implementation %s: %s", impl, e.getMessage()),
e);
}
Catalog catalog;
try {
catalog = ctor.newInstance();
} catch (ClassCastException e) {
throw new IllegalArgumentException(
String.format("Cannot initialize Catalog, %s does not implement Catalog.", impl), e);
}
configureHadoopConf(catalog, hadoopConf);
catalog.initialize(catalogName, properties);
return catalog;
}
|
@Test
public void loadCustomCatalog_withHadoopConfig() {
Map<String, String> options = Maps.newHashMap();
options.put("key", "val");
Configuration hadoopConf = new Configuration();
hadoopConf.set("key", "val");
String name = "custom";
Catalog catalog =
CatalogUtil.loadCatalog(TestCatalogConfigurable.class.getName(), name, options, hadoopConf);
assertThat(catalog).isInstanceOf(TestCatalogConfigurable.class);
assertThat(((TestCatalogConfigurable) catalog).catalogName).isEqualTo(name);
assertThat(((TestCatalogConfigurable) catalog).catalogProperties).isEqualTo(options);
assertThat(((TestCatalogConfigurable) catalog).configuration).isEqualTo(hadoopConf);
}
|
@Override
public void handle(LogHandlerEvent event) {
switch (event.getType()) {
case APPLICATION_STARTED:
LogHandlerAppStartedEvent appStartEvent =
(LogHandlerAppStartedEvent) event;
initApp(appStartEvent.getApplicationId(), appStartEvent.getUser(),
appStartEvent.getCredentials(),
appStartEvent.getApplicationAcls(),
appStartEvent.getLogAggregationContext(),
appStartEvent.getRecoveredAppLogInitedTime());
break;
case CONTAINER_FINISHED:
LogHandlerContainerFinishedEvent containerFinishEvent =
(LogHandlerContainerFinishedEvent) event;
stopContainer(containerFinishEvent.getContainerId(),
containerFinishEvent.getContainerType(),
containerFinishEvent.getExitCode());
break;
case APPLICATION_FINISHED:
LogHandlerAppFinishedEvent appFinishedEvent =
(LogHandlerAppFinishedEvent) event;
stopApp(appFinishedEvent.getApplicationId());
break;
case LOG_AGG_TOKEN_UPDATE:
checkAndEnableAppAggregators();
break;
default:
; // Ignore
}
}
|
@Test
public void testRemoteRootLogDirIsCreatedWithCorrectGroupOwner()
throws IOException {
this.conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath());
Path aNewFile = new Path(String.valueOf("tmp"+System.currentTimeMillis()));
this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, aNewFile.getName());
LogAggregationService logAggregationService = new LogAggregationService(
dispatcher, this.context, this.delSrvc, super.dirsHandler);
logAggregationService.init(this.conf);
logAggregationService.start();
ApplicationId appId = ApplicationId.newInstance(
System.currentTimeMillis(), 1);
LogAggregationContext contextWithAMAndFailed =
Records.newRecord(LogAggregationContext.class);
contextWithAMAndFailed.setLogAggregationPolicyClassName(
AMOrFailedContainerLogAggregationPolicy.class.getName());
logAggregationService.handle(new LogHandlerAppStartedEvent(appId,
this.user, null, this.acls, contextWithAMAndFailed));
dispatcher.await();
String targetGroup =
UserGroupInformation.getLoginUser().getPrimaryGroupName();
FileSystem fs = FileSystem.get(this.conf);
FileStatus fileStatus = fs.getFileStatus(aNewFile);
Assert.assertEquals("The new aggregate file is not successfully created",
fileStatus.getGroup(), targetGroup);
fs.delete(aNewFile, true);
logAggregationService.stop();
}
|
@Override
public Collection<DatabasePacket> execute() {
failedIfContainsMultiStatements();
MetaDataContexts metaDataContexts = ProxyContext.getInstance().getContextManager().getMetaDataContexts();
SQLParserRule sqlParserRule = metaDataContexts.getMetaData().getGlobalRuleMetaData().getSingleRule(SQLParserRule.class);
DatabaseType databaseType = TypedSPILoader.getService(DatabaseType.class, "MySQL");
SQLStatement sqlStatement = sqlParserRule.getSQLParserEngine(databaseType).parse(packet.getSQL(), true);
if (!MySQLComStmtPrepareChecker.isAllowedStatement(sqlStatement)) {
throw new UnsupportedPreparedStatementException();
}
SQLStatementContext sqlStatementContext = new SQLBindEngine(ProxyContext.getInstance().getContextManager().getMetaDataContexts().getMetaData(),
connectionSession.getCurrentDatabaseName(), packet.getHintValueContext()).bind(sqlStatement, Collections.emptyList());
int statementId = MySQLStatementIdGenerator.getInstance().nextStatementId(connectionSession.getConnectionId());
MySQLServerPreparedStatement serverPreparedStatement = new MySQLServerPreparedStatement(packet.getSQL(), sqlStatementContext, packet.getHintValueContext(), new CopyOnWriteArrayList<>());
connectionSession.getServerPreparedStatementRegistry().addPreparedStatement(statementId, serverPreparedStatement);
return createPackets(sqlStatementContext, statementId, serverPreparedStatement);
}
|
@Test
void assertPrepareNotAllowedStatement() {
when(packet.getSQL()).thenReturn("begin");
ContextManager contextManager = mockContextManager();
when(ProxyContext.getInstance().getContextManager()).thenReturn(contextManager);
assertThrows(UnsupportedPreparedStatementException.class, () -> new MySQLComStmtPrepareExecutor(packet, connectionSession).execute());
}
|
public TreeMap<Integer, T_PreKey> generateOmemoPreKeys(int startId, int count) {
return keyUtil().generateOmemoPreKeys(startId, count);
}
|
@Test
public void generateOmemoPreKeys() {
TreeMap<Integer, T_PreKey> keys = store.generateOmemoPreKeys(31, 49);
assertNotNull("Generated data structure must not be null.", keys);
byte[] lastKey = null;
for (int i = 31; i <= 79; i++) {
assertEquals("Key ids must be ascending order, starting at 31.", Integer.valueOf(i), keys.firstKey());
assertNotNull("Every id must match to a key.", keys.get(keys.firstKey()));
byte[] bytes = store.keyUtil().preKeyToBytes(keys.get(keys.firstKey()));
assertNotNull("Serialized preKey must not be null.", bytes);
assertNotSame("Serialized preKey must not be of length 0.", 0, bytes.length);
if (lastKey != null) {
assertFalse("PreKeys MUST NOT be equal.", Arrays.equals(lastKey, bytes));
}
lastKey = bytes;
keys.remove(keys.firstKey());
}
assertEquals("After deleting 49 keys, there must be no keys left.", 0, keys.size());
}
|
@Override
public String getManagedUsersSqlFilter(boolean filterByManaged) {
return findManagedInstanceService()
.map(managedInstanceService -> managedInstanceService.getManagedUsersSqlFilter(filterByManaged))
.orElseThrow(() -> NOT_MANAGED_INSTANCE_EXCEPTION);
}
|
@Test
public void getManagedUsersSqlFilter_delegatesToRightService_andPropagateAnswer() {
AlwaysManagedInstanceService alwaysManagedInstanceService = new AlwaysManagedInstanceService();
DelegatingManagedServices managedInstanceService = new DelegatingManagedServices(Set.of(new NeverManagedInstanceService(), alwaysManagedInstanceService));
assertThat(managedInstanceService.getManagedUsersSqlFilter(true)).isNotNull().isEqualTo(alwaysManagedInstanceService.getManagedUsersSqlFilter(
true));
}
|
public CompletableFuture<Set<MessageQueue>> lockBatchMQ(ProxyContext ctx, Set<MessageQueue> mqSet,
String consumerGroup, String clientId, long timeoutMillis) {
CompletableFuture<Set<MessageQueue>> future = new CompletableFuture<>();
try {
Set<MessageQueue> successSet = new CopyOnWriteArraySet<>();
Set<AddressableMessageQueue> addressableMessageQueueSet = buildAddressableSet(ctx, mqSet);
Map<String, List<AddressableMessageQueue>> messageQueueSetMap = buildAddressableMapByBrokerName(addressableMessageQueueSet);
List<CompletableFuture<Void>> futureList = new ArrayList<>();
messageQueueSetMap.forEach((k, v) -> {
LockBatchRequestBody requestBody = new LockBatchRequestBody();
requestBody.setConsumerGroup(consumerGroup);
requestBody.setClientId(clientId);
requestBody.setMqSet(v.stream().map(AddressableMessageQueue::getMessageQueue).collect(Collectors.toSet()));
CompletableFuture<Void> future0 = serviceManager.getMessageService()
.lockBatchMQ(ctx, v.get(0), requestBody, timeoutMillis)
.thenAccept(successSet::addAll);
futureList.add(FutureUtils.addExecutor(future0, this.executor));
});
CompletableFuture.allOf(futureList.toArray(new CompletableFuture[0])).whenComplete((v, t) -> {
if (t != null) {
log.error("LockBatchMQ failed, group={}", consumerGroup, t);
}
future.complete(successSet);
});
} catch (Throwable t) {
log.error("LockBatchMQ exception, group={}", consumerGroup, t);
future.completeExceptionally(t);
}
return FutureUtils.addExecutor(future, this.executor);
}
|
@Test
public void testLockBatchPartialSuccessWithException() throws Throwable {
Set<MessageQueue> mqSet = new HashSet<>();
MessageQueue mq1 = new MessageQueue(TOPIC, "broker1", 0);
AddressableMessageQueue addressableMessageQueue1 = new AddressableMessageQueue(mq1, "127.0.0.1");
MessageQueue mq2 = new MessageQueue(TOPIC, "broker2", 0);
AddressableMessageQueue addressableMessageQueue2 = new AddressableMessageQueue(mq2, "127.0.0.1");
mqSet.add(mq1);
mqSet.add(mq2);
when(this.topicRouteService.buildAddressableMessageQueue(any(), any())).thenAnswer(i -> new AddressableMessageQueue((MessageQueue) i.getArguments()[1], "127.0.0.1"));
when(this.messageService.lockBatchMQ(any(), eq(addressableMessageQueue1), any(), anyLong()))
.thenReturn(CompletableFuture.completedFuture(Sets.newHashSet(mq1)));
CompletableFuture<Set<MessageQueue>> future = new CompletableFuture<>();
future.completeExceptionally(new MQBrokerException(1, "err"));
when(this.messageService.lockBatchMQ(any(), eq(addressableMessageQueue2), any(), anyLong()))
.thenReturn(future);
Set<MessageQueue> result = this.consumerProcessor.lockBatchMQ(ProxyContext.create(), mqSet, CONSUMER_GROUP, CLIENT_ID, 1000)
.get();
assertThat(result).isEqualTo(Sets.newHashSet(mq1));
}
|
@Override
public void setWhereFrom(final Local file, final String dataUrl) throws LocalAccessDeniedException {
synchronized(lock) {
if(StringUtils.isBlank(dataUrl)) {
log.warn("No data url given");
return;
}
if(!this.setWhereFrom(file.getAbsolute(), dataUrl)) {
throw new LocalAccessDeniedException(file.getAbsolute());
}
}
}
|
@Test
public void testSetWhereEmptyUrl() throws Exception {
final QuarantineService q = new LaunchServicesQuarantineService();
q.setWhereFrom(new NullLocal("/", "n"), null);
q.setWhereFrom(new NullLocal("/", "n"), StringUtils.EMPTY);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.