focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@ApiOperation(value = "Delete a user", tags = { "Users" }, code = 204)
@ApiResponses(value = {
@ApiResponse(code = 204, message = "Indicates the user was found and has been deleted. Response-body is intentionally empty."),
@ApiResponse(code = 404, message = "Indicates the requested user was not found.")
})
@DeleteMapping("/identity/users/{userId}")
@ResponseStatus(HttpStatus.NO_CONTENT)
public void deleteUser(@ApiParam(name = "userId") @PathVariable String userId) {
User user = getUserFromRequest(userId);
if (restApiInterceptor != null) {
restApiInterceptor.deleteUser(user);
}
identityService.deleteUser(user.getId());
}
|
@Test
public void testUpdateUserNoFields() throws Exception {
User savedUser = null;
try {
User newUser = identityService.newUser("testuser");
newUser.setFirstName("Fred");
newUser.setLastName("McDonald");
newUser.setDisplayName("Fred McDonald");
newUser.setEmail("no-reply@flowable.org");
identityService.saveUser(newUser);
savedUser = newUser;
ObjectNode taskUpdateRequest = objectMapper.createObjectNode();
HttpPut httpPut = new HttpPut(SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_USER, newUser.getId()));
httpPut.setEntity(new StringEntity(taskUpdateRequest.toString()));
CloseableHttpResponse response = executeRequest(httpPut, HttpStatus.SC_OK);
JsonNode responseNode = objectMapper.readTree(response.getEntity().getContent());
closeResponse(response);
assertThat(responseNode).isNotNull();
assertThatJson(responseNode)
.when(Option.IGNORING_EXTRA_FIELDS)
.isEqualTo("{"
+ "id: 'testuser',"
+ "firstName: 'Fred',"
+ "lastName: 'McDonald',"
+ "displayName: 'Fred McDonald',"
+ "email: 'no-reply@flowable.org',"
+ "url: '" + SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_USER, newUser.getId()) + "'"
+ "}");
// Check user is updated in Flowable
newUser = identityService.createUserQuery().userId(newUser.getId()).singleResult();
assertThat(newUser.getLastName()).isEqualTo("McDonald");
assertThat(newUser.getFirstName()).isEqualTo("Fred");
assertThat(newUser.getDisplayName()).isEqualTo("Fred McDonald");
assertThat(newUser.getEmail()).isEqualTo("no-reply@flowable.org");
assertThat(newUser.getPassword()).isNull();
} finally {
// Delete user after test fails
if (savedUser != null) {
identityService.deleteUser(savedUser.getId());
}
}
}
|
@Override
public DescriptiveUrlBag toUrl(final Path file) {
if(file.isVolume()) {
return DescriptiveUrlBag.empty();
}
final DescriptiveUrlBag list = new DescriptiveUrlBag();
if(file.isFile()) {
final String download = String.format("%s/file/%s/%s", session.getClient().getDownloadUrl(),
URIEncoder.encode(containerService.getContainer(file).getName()),
URIEncoder.encode(containerService.getKey(file)));
list.add(new DescriptiveUrl(URI.create(download), DescriptiveUrl.Type.http,
MessageFormat.format(LocaleFactory.localizedString("{0} URL"), Scheme.https.name().toUpperCase(Locale.ROOT))));
}
return list;
}
|
@Test
public void testToUrl() throws Exception {
final Path bucket = new Path("/test-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume));
final Path test = new Path(bucket, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final B2VersionIdProvider fileid = new B2VersionIdProvider(session);
new B2TouchFeature(session, fileid).touch(test, new TransferStatus());
final B2UrlProvider provider = new B2UrlProvider(session);
assertEquals(0, provider.toUrl(bucket).size());
assertEquals(1, provider.toUrl(test).size());
assertNotNull(provider.toUrl(test).find(DescriptiveUrl.Type.http).getUrl());
new B2DeleteFeature(session, fileid).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
@Udf(description = "Returns the sine of an INT value")
public Double sin(
@UdfParameter(
value = "value",
description = "The value in radians to get the sine of."
) final Integer value
) {
return sin(value == null ? null : value.doubleValue());
}
|
@Test
public void shouldHandleMoreThanPositive2Pi() {
assertThat(udf.sin(9.1), closeTo(0.3190983623493521, 0.000000000000001));
assertThat(udf.sin(6.3), closeTo(0.016813900484349713, 0.000000000000001));
assertThat(udf.sin(7), closeTo(0.6569865987187891, 0.000000000000001));
assertThat(udf.sin(7L), closeTo(0.6569865987187891, 0.000000000000001));
}
|
@Override
public Method getMethod() {
return method;
}
|
@Test
void getMethod() {
Assertions.assertEquals("sayHello", method.getMethod().getName());
}
|
@Operation(summary = "queryRuleList", description = "QUERY_RULE_LIST_NOTES")
@GetMapping(value = "/ruleList")
@ResponseStatus(HttpStatus.OK)
@ApiException(QUERY_RULE_LIST_ERROR)
public Result<List<DqRule>> queryRuleList() {
List<DqRule> dqRules = dqRuleService.queryAllRuleList();
return Result.success(dqRules);
}
|
@Test
public void testQueryRuleList() {
when(dqRuleService.queryAllRuleList()).thenReturn(getRuleList());
Result<List<DqRule>> listResult = dataQualityController.queryRuleList();
Assertions.assertEquals(Status.SUCCESS.getCode(), listResult.getCode().intValue());
}
|
@Override
public Integer call() throws Exception {
return this.call(
Template.class,
yamlFlowParser,
modelValidator,
(Object object) -> {
Template template = (Template) object;
return template.getNamespace() + " / " + template.getId();
},
(Object object) -> Collections.emptyList()
);
}
|
@Test
void runServer() {
URL directory = TemplateValidateCommandTest.class.getClassLoader().getResource("invalidsTemplates/template.yml");
ByteArrayOutputStream out = new ByteArrayOutputStream();
System.setErr(new PrintStream(out));
try (ApplicationContext ctx = ApplicationContext.run(Map.of("kestra.templates.enabled", "true"), Environment.CLI, Environment.TEST)) {
EmbeddedServer embeddedServer = ctx.getBean(EmbeddedServer.class);
embeddedServer.start();
String[] args = {
"--server",
embeddedServer.getURL().toString(),
"--user",
"myuser:pass:word",
directory.getPath()
};
Integer call = PicocliRunner.call(TemplateValidateCommand.class, ctx, args);
assertThat(call, is(1));
assertThat(out.toString(), containsString("Unable to parse template"));
assertThat(out.toString(), containsString("must not be empty"));
}
}
|
abstract void execute(Admin admin, Namespace ns, PrintStream out) throws Exception;
|
@Test
public void testDescribeTransaction() throws Exception {
String transactionalId = "foo";
String[] args = new String[] {
"--bootstrap-server",
"localhost:9092",
"describe",
"--transactional-id",
transactionalId
};
DescribeTransactionsResult describeResult = Mockito.mock(DescribeTransactionsResult.class);
int coordinatorId = 5;
long transactionStartTime = time.milliseconds();
KafkaFuture<TransactionDescription> describeFuture = completedFuture(
new TransactionDescription(
coordinatorId,
TransactionState.ONGOING,
12345L,
15,
10000,
OptionalLong.of(transactionStartTime),
singleton(new TopicPartition("bar", 0))
));
Mockito.when(describeResult.description(transactionalId)).thenReturn(describeFuture);
Mockito.when(admin.describeTransactions(singleton(transactionalId))).thenReturn(describeResult);
// Add a little time so that we can see a positive transaction duration in the output
time.sleep(5000);
execute(args);
assertNormalExit();
List<List<String>> table = readOutputAsTable();
assertEquals(2, table.size());
List<String> expectedHeaders = TransactionsCommand.DescribeTransactionsCommand.HEADERS;
assertEquals(expectedHeaders, table.get(0));
List<String> expectedRow = asList(
String.valueOf(coordinatorId),
transactionalId,
"12345",
"15",
"Ongoing",
"10000",
String.valueOf(transactionStartTime),
"5000",
"bar-0"
);
assertEquals(expectedRow, table.get(1));
}
|
@Override
public KvMetadata resolveMetadata(
boolean isKey,
List<MappingField> resolvedFields,
Map<String, String> options,
InternalSerializationService serializationService
) {
Map<QueryPath, MappingField> fieldsByPath = extractFields(resolvedFields, isKey);
Entry<QueryDataType, Class<?>> entry = getTopLevelType(fieldsByPath)
.<Entry<QueryDataType, Class<?>>>map(type -> entry(type, loadClass(type.getObjectTypeMetadata())))
.orElseGet(() -> {
Class<?> typeClass = loadClass(options, isKey);
return entry(QueryDataTypeUtils.resolveTypeForClass(typeClass), typeClass);
});
QueryDataType type = entry.getKey();
Class<?> typeClass = entry.getValue();
if (type.getTypeFamily() != QueryDataTypeFamily.OBJECT || type.isCustomType()) {
return resolvePrimitiveMetadata(isKey, resolvedFields, fieldsByPath, type);
} else {
return resolveObjectMetadata(isKey, resolvedFields, fieldsByPath, typeClass);
}
}
|
@Test
@Parameters({
"true, __key",
"false, this"
})
public void test_resolveMetadata(boolean key, String prefix) {
Map<String, String> options = Map.of(
(key ? OPTION_KEY_FORMAT : OPTION_VALUE_FORMAT), JAVA_FORMAT,
(key ? OPTION_KEY_CLASS : OPTION_VALUE_CLASS), Type.class.getName()
);
KvMetadata metadata = INSTANCE.resolveMetadata(
key,
singletonList(field("field", QueryDataType.INT, prefix + ".field")),
options,
null
);
assertThat(metadata.getFields()).containsExactly(
new MapTableField("field", QueryDataType.INT, false, QueryPath.create(prefix + ".field")),
new MapTableField(prefix, QueryDataType.OBJECT, true, QueryPath.create(prefix))
);
assertThat(metadata.getQueryTargetDescriptor()).isEqualTo(GenericQueryTargetDescriptor.DEFAULT);
assertThat(metadata.getUpsertTargetDescriptor())
.isEqualToComparingFieldByField(new PojoUpsertTargetDescriptor(
Type.class.getName(),
Map.of("field", int.class.getName())
));
}
|
@Override
public void run() {
try {
// We kill containers until the kernel reports the OOM situation resolved
// Note: If the kernel has a delay this may kill more than necessary
while (true) {
String status = cgroups.getCGroupParam(
CGroupsHandler.CGroupController.MEMORY,
"",
CGROUP_PARAM_MEMORY_OOM_CONTROL);
if (!status.contains(CGroupsHandler.UNDER_OOM)) {
break;
}
boolean containerKilled = killContainer();
if (!containerKilled) {
// This can happen, if SIGKILL did not clean up
// non-PGID or containers or containers launched by other users
// or if a process was put to the root YARN cgroup.
throw new YarnRuntimeException(
"Could not find any containers but CGroups " +
"reserved for containers ran out of memory. " +
"I am giving up");
}
}
} catch (ResourceHandlerException ex) {
LOG.warn("Could not fetch OOM status. " +
"This is expected at shutdown. Exiting.", ex);
}
}
|
@Test
public void testKillOnlyRunningContainersUponOOM() throws Exception {
ConcurrentHashMap<ContainerId, Container> containers =
new ConcurrentHashMap<>();
Container c1 = createContainer(1, false, 1L, false);
containers.put(c1.getContainerId(), c1);
Container c2 = createContainer(2, false, 2L, true);
containers.put(c2.getContainerId(), c2);
ContainerExecutor ex = createContainerExecutor(containers);
Context context = mock(Context.class);
when(context.getContainers()).thenReturn(containers);
when(context.getContainerExecutor()).thenReturn(ex);
CGroupsHandler cGroupsHandler = mock(CGroupsHandler.class);
when(cGroupsHandler.getCGroupParam(
CGroupsHandler.CGroupController.MEMORY,
"",
CGROUP_PARAM_MEMORY_OOM_CONTROL))
.thenReturn("under_oom 1").thenReturn("under_oom 0");
when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
c2.getContainerId().toString(), CGROUP_PROCS_FILE))
.thenReturn("1234").thenReturn("");
when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
c2.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES))
.thenReturn(getMB(9));
when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
c2.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES))
.thenReturn(getMB(9));
DefaultOOMHandler handler =
new DefaultOOMHandler(context, false) {
@Override
protected CGroupsHandler getCGroupsHandler() {
return cGroupsHandler;
}
};
handler.run();
verify(ex, times(1)).signalContainer(
new ContainerSignalContext.Builder()
.setPid("1235")
.setContainer(c2)
.setSignal(ContainerExecutor.Signal.KILL)
.build()
);
verify(ex, times(1)).signalContainer(any());
}
|
@Override
public synchronized ScheduleResult schedule()
{
dropListenersFromWhenFinishedOrNewLifespansAdded();
int overallSplitAssignmentCount = 0;
ImmutableSet.Builder<RemoteTask> overallNewTasks = ImmutableSet.builder();
List<ListenableFuture<?>> overallBlockedFutures = new ArrayList<>();
boolean anyBlockedOnPlacements = false;
boolean anyBlockedOnNextSplitBatch = false;
boolean anyNotBlocked = false;
for (Entry<Lifespan, ScheduleGroup> entry : scheduleGroups.entrySet()) {
Lifespan lifespan = entry.getKey();
ScheduleGroup scheduleGroup = entry.getValue();
if (scheduleGroup.state == ScheduleGroupState.NO_MORE_SPLITS || scheduleGroup.state == ScheduleGroupState.DONE) {
verify(scheduleGroup.nextSplitBatchFuture == null);
}
else if (scheduleGroup.pendingSplits.isEmpty()) {
// try to get the next batch
if (scheduleGroup.nextSplitBatchFuture == null) {
scheduleGroup.nextSplitBatchFuture = splitSource.getNextBatch(scheduleGroup.partitionHandle, lifespan, splitBatchSize);
long start = System.nanoTime();
addSuccessCallback(scheduleGroup.nextSplitBatchFuture, () -> stage.recordGetSplitTime(start));
}
if (scheduleGroup.nextSplitBatchFuture.isDone()) {
SplitBatch nextSplits = getFutureValue(scheduleGroup.nextSplitBatchFuture);
scheduleGroup.nextSplitBatchFuture = null;
scheduleGroup.pendingSplits = new HashSet<>(nextSplits.getSplits());
if (nextSplits.isLastBatch()) {
if (scheduleGroup.state == ScheduleGroupState.INITIALIZED && scheduleGroup.pendingSplits.isEmpty()) {
// Add an empty split in case no splits have been produced for the source.
// For source operators, they never take input, but they may produce output.
// This is well handled by Presto execution engine.
// However, there are certain non-source operators that may produce output without any input,
// for example, 1) an AggregationOperator, 2) a HashAggregationOperator where one of the grouping sets is ().
// Scheduling an empty split kicks off necessary driver instantiation to make this work.
scheduleGroup.pendingSplits.add(new Split(
splitSource.getConnectorId(),
splitSource.getTransactionHandle(),
new EmptySplit(splitSource.getConnectorId()),
lifespan,
NON_CACHEABLE));
}
scheduleGroup.state = ScheduleGroupState.NO_MORE_SPLITS;
}
}
else {
overallBlockedFutures.add(scheduleGroup.nextSplitBatchFuture);
anyBlockedOnNextSplitBatch = true;
continue;
}
}
Multimap<InternalNode, Split> splitAssignment = ImmutableMultimap.of();
if (!scheduleGroup.pendingSplits.isEmpty()) {
if (!scheduleGroup.placementFuture.isDone()) {
anyBlockedOnPlacements = true;
continue;
}
if (scheduleGroup.state == ScheduleGroupState.INITIALIZED) {
scheduleGroup.state = ScheduleGroupState.SPLITS_ADDED;
}
if (state == State.INITIALIZED) {
state = State.SPLITS_ADDED;
}
// calculate placements for splits
SplitPlacementResult splitPlacementResult = splitPlacementPolicy.computeAssignments(scheduleGroup.pendingSplits);
splitAssignment = splitPlacementResult.getAssignments();
// remove splits with successful placements
splitAssignment.values().forEach(scheduleGroup.pendingSplits::remove); // AbstractSet.removeAll performs terribly here.
overallSplitAssignmentCount += splitAssignment.size();
// if not completed placed, mark scheduleGroup as blocked on placement
if (!scheduleGroup.pendingSplits.isEmpty()) {
scheduleGroup.placementFuture = splitPlacementResult.getBlocked();
overallBlockedFutures.add(scheduleGroup.placementFuture);
anyBlockedOnPlacements = true;
}
}
// if no new splits will be assigned, update state and attach completion event
Multimap<InternalNode, Lifespan> noMoreSplitsNotification = ImmutableMultimap.of();
if (scheduleGroup.pendingSplits.isEmpty() && scheduleGroup.state == ScheduleGroupState.NO_MORE_SPLITS) {
scheduleGroup.state = ScheduleGroupState.DONE;
if (!lifespan.isTaskWide()) {
InternalNode node = ((BucketedSplitPlacementPolicy) splitPlacementPolicy).getNodeForBucket(lifespan.getId());
noMoreSplitsNotification = ImmutableMultimap.of(node, lifespan);
}
}
// assign the splits with successful placements
overallNewTasks.addAll(assignSplits(splitAssignment, noMoreSplitsNotification));
// Assert that "placement future is not done" implies "pendingSplits is not empty".
// The other way around is not true. One obvious reason is (un)lucky timing, where the placement is unblocked between `computeAssignments` and this line.
// However, there are other reasons that could lead to this.
// Note that `computeAssignments` is quite broken:
// 1. It always returns a completed future when there are no tasks, regardless of whether all nodes are blocked.
// 2. The returned future will only be completed when a node with an assigned task becomes unblocked. Other nodes don't trigger future completion.
// As a result, to avoid busy loops caused by 1, we check pendingSplits.isEmpty() instead of placementFuture.isDone() here.
if (scheduleGroup.nextSplitBatchFuture == null && scheduleGroup.pendingSplits.isEmpty() && scheduleGroup.state != ScheduleGroupState.DONE) {
anyNotBlocked = true;
}
}
// * `splitSource.isFinished` invocation may fail after `splitSource.close` has been invoked.
// If state is NO_MORE_SPLITS/FINISHED, splitSource.isFinished has previously returned true, and splitSource is closed now.
// * Even if `splitSource.isFinished()` return true, it is not necessarily safe to tear down the split source.
// * If anyBlockedOnNextSplitBatch is true, it means we have not checked out the recently completed nextSplitBatch futures,
// which may contain recently published splits. We must not ignore those.
// * If any scheduleGroup is still in DISCOVERING_SPLITS state, it means it hasn't realized that there will be no more splits.
// Next time it invokes getNextBatch, it will realize that. However, the invocation will fail we tear down splitSource now.
//
// Since grouped execution is going to support failure recovery, and scheduled splits might have to be rescheduled during retry,
// we can no longer claim schedule is complete after all splits are scheduled.
// Splits schedule can only be considered as finished when all lifespan executions are done
// (by calling `notifyAllLifespansFinishedExecution`)
if ((state == State.NO_MORE_SPLITS || state == State.FINISHED) || (!groupedExecution && lifespanAdded && scheduleGroups.isEmpty() && splitSource.isFinished())) {
switch (state) {
case INITIALIZED:
// We have not scheduled a single split so far.
// But this shouldn't be possible. See usage of EmptySplit in this method.
throw new IllegalStateException("At least 1 split should have been scheduled for this plan node");
case SPLITS_ADDED:
state = State.NO_MORE_SPLITS;
splitSource.close();
// fall through
case NO_MORE_SPLITS:
state = State.FINISHED;
whenFinishedOrNewLifespanAdded.set(null);
// fall through
case FINISHED:
return ScheduleResult.nonBlocked(
true,
overallNewTasks.build(),
overallSplitAssignmentCount);
default:
throw new IllegalStateException("Unknown state");
}
}
if (anyNotBlocked) {
return ScheduleResult.nonBlocked(false, overallNewTasks.build(), overallSplitAssignmentCount);
}
if (anyBlockedOnPlacements) {
// In a broadcast join, output buffers of the tasks in build source stage have to
// hold onto all data produced before probe side task scheduling finishes,
// even if the data is acknowledged by all known consumers. This is because
// new consumers may be added until the probe side task scheduling finishes.
//
// As a result, the following line is necessary to prevent deadlock
// due to neither build nor probe can make any progress.
// The build side blocks due to a full output buffer.
// In the meantime the probe side split cannot be consumed since
// builder side hash table construction has not finished.
//
// TODO: When SourcePartitionedScheduler is used as a SourceScheduler, it shouldn't need to worry about
// task scheduling and creation -- these are done by the StageScheduler.
overallNewTasks.addAll(finalizeTaskCreationIfNecessary());
}
ScheduleResult.BlockedReason blockedReason;
if (anyBlockedOnNextSplitBatch) {
blockedReason = anyBlockedOnPlacements ? MIXED_SPLIT_QUEUES_FULL_AND_WAITING_FOR_SOURCE : WAITING_FOR_SOURCE;
}
else {
blockedReason = anyBlockedOnPlacements ? SPLIT_QUEUES_FULL : NO_ACTIVE_DRIVER_GROUP;
}
overallBlockedFutures.add(whenFinishedOrNewLifespanAdded);
return ScheduleResult.blocked(
false,
overallNewTasks.build(),
nonCancellationPropagating(whenAnyComplete(overallBlockedFutures)),
blockedReason,
overallSplitAssignmentCount);
}
|
@Test
public void testScheduleNoSplits()
{
SubPlan plan = createPlan();
NodeTaskMap nodeTaskMap = new NodeTaskMap(finalizerService);
SqlStageExecution stage = createSqlStageExecution(plan, nodeTaskMap);
StageScheduler scheduler = getSourcePartitionedScheduler(createFixedSplitSource(0, TestingSplit::createRemoteSplit), stage, nodeManager, nodeTaskMap, 1);
ScheduleResult scheduleResult = scheduler.schedule();
assertEquals(scheduleResult.getNewTasks().size(), 1);
assertEffectivelyFinished(scheduleResult, scheduler);
stage.abort();
}
|
@Override
public <VR> KTable<K, VR> aggregate(final Initializer<VR> initializer,
final Aggregator<? super K, ? super V, VR> aggregator,
final Materialized<K, VR, KeyValueStore<Bytes, byte[]>> materialized) {
return aggregate(initializer, aggregator, NamedInternal.empty(), materialized);
}
|
@Test
public void shouldAggregateAndMaterializeResults() {
groupedStream.aggregate(
MockInitializer.STRING_INIT,
MockAggregator.TOSTRING_ADDER,
Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as("aggregate")
.withKeySerde(Serdes.String())
.withValueSerde(Serdes.String()));
try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) {
processData(driver);
{
final KeyValueStore<String, String> aggregate = driver.getKeyValueStore("aggregate");
assertThat(aggregate.get("1"), equalTo("0+A+C+D"));
assertThat(aggregate.get("2"), equalTo("0+B"));
assertThat(aggregate.get("3"), equalTo("0+E+F"));
}
{
final KeyValueStore<String, ValueAndTimestamp<String>> aggregate = driver.getTimestampedKeyValueStore("aggregate");
assertThat(aggregate.get("1"), equalTo(ValueAndTimestamp.make("0+A+C+D", 10L)));
assertThat(aggregate.get("2"), equalTo(ValueAndTimestamp.make("0+B", 1L)));
assertThat(aggregate.get("3"), equalTo(ValueAndTimestamp.make("0+E+F", 9L)));
}
}
}
|
public RequestUrl(String url, Map<String, String> params) {
this.url = url;
this.params = params;
this.queryParams = initQueryParams(url);
}
|
@Test
void testRequestUrl() {
RequestUrl requestUrl = new MatchUrl("/api/jobs/enqueued?offset=2&limit=2").toRequestUrl("/api/jobs/:state");
assertThat(requestUrl.getUrl()).isEqualTo("/api/jobs/enqueued?offset=2&limit=2");
}
|
public static void mergeMap(boolean decrypt, Map<String, Object> config) {
merge(decrypt, config);
}
|
@Test(expected = RuntimeException.class)
public void testMap_key_notAllowNullOverwrite() {
Map<String, Object> testMap = new HashMap<>();
testMap.put("${TEST.null: key}", "value");
CentralizedManagement.mergeMap(true, testMap);
}
|
public static <T> T loadData(Map<String, Object> config,
T existingData,
Class<T> dataCls) {
try {
String existingConfigJson = MAPPER.writeValueAsString(existingData);
Map<String, Object> existingConfig = MAPPER.readValue(existingConfigJson, Map.class);
Map<String, Object> newConfig = new HashMap<>();
newConfig.putAll(existingConfig);
newConfig.putAll(config);
String configJson = MAPPER.writeValueAsString(newConfig);
return MAPPER.readValue(configJson, dataCls);
} catch (IOException e) {
throw new RuntimeException("Failed to load config into existing configuration data", e);
}
}
|
@Test
public void testLoadConsumerConfigurationData() {
ConsumerConfigurationData confData = new ConsumerConfigurationData();
confData.setSubscriptionName("unknown-subscription");
confData.setPriorityLevel(10000);
confData.setConsumerName("unknown-consumer");
confData.setAutoUpdatePartitionsIntervalSeconds(1, TimeUnit.MINUTES);
Map<String, Object> config = new HashMap<>();
config.put("subscriptionName", "test-subscription");
config.put("priorityLevel", 100);
confData = ConfigurationDataUtils.loadData(config, confData, ConsumerConfigurationData.class);
assertEquals("test-subscription", confData.getSubscriptionName());
assertEquals(100, confData.getPriorityLevel());
assertEquals("unknown-consumer", confData.getConsumerName());
assertEquals(60,confData.getAutoUpdatePartitionsIntervalSeconds());
}
|
@ApiOperation(value = "Create Or Update the Mobile application settings (saveMobileAppSettings)",
notes = "The request payload contains configuration for android/iOS applications and platform qr code widget settings." + SYSTEM_AUTHORITY_PARAGRAPH)
@PreAuthorize("hasAnyAuthority('SYS_ADMIN')")
@PostMapping(value = "/api/mobile/app/settings")
public MobileAppSettings saveMobileAppSettings(@Parameter(description = "A JSON value representing the mobile apps configuration")
@RequestBody MobileAppSettings mobileAppSettings) throws ThingsboardException {
SecurityUser currentUser = getCurrentUser();
accessControlService.checkPermission(currentUser, Resource.MOBILE_APP_SETTINGS, Operation.WRITE);
mobileAppSettings.setTenantId(getTenantId());
return mobileAppSettingsService.saveMobileAppSettings(currentUser.getTenantId(), mobileAppSettings);
}
|
@Test
public void testSaveMobileAppSettings() throws Exception {
loginSysAdmin();
MobileAppSettings mobileAppSettings = doGet("/api/mobile/app/settings", MobileAppSettings.class);
assertThat(mobileAppSettings.getQrCodeConfig().getQrCodeLabel()).isEqualTo(TEST_LABEL);
assertThat(mobileAppSettings.isUseDefaultApp()).isTrue();
mobileAppSettings.setUseDefaultApp(false);
doPost("/api/mobile/app/settings", mobileAppSettings)
.andExpect(status().isOk());
MobileAppSettings updatedMobileAppSettings = doGet("/api/mobile/app/settings", MobileAppSettings.class);
assertThat(updatedMobileAppSettings.isUseDefaultApp()).isFalse();
}
|
public static void checkCPSubsystemConfig(CPSubsystemConfig config) {
checkTrue(config.getGroupSize() <= config.getCPMemberCount(),
"The group size parameter cannot be bigger than the number of the CP member count");
checkTrue(config.getSessionTimeToLiveSeconds() > config.getSessionHeartbeatIntervalSeconds(),
"Session TTL must be greater than session heartbeat interval!");
checkTrue(config.getMissingCPMemberAutoRemovalSeconds() == 0
|| config.getSessionTimeToLiveSeconds() <= config.getMissingCPMemberAutoRemovalSeconds(),
"Session TTL must be smaller than or equal to missing CP member auto-removal seconds!");
checkTrue(!config.isPersistenceEnabled() || config.getCPMemberCount() > 0,
"CP member count must be greater than 0 to use CP persistence feature!");
}
|
@Test(expected = IllegalArgumentException.class)
public void testValidationFails_whenGroupSizeSetCPMemberCountNotSet() {
CPSubsystemConfig config = new CPSubsystemConfig();
config.setGroupSize(3);
checkCPSubsystemConfig(config);
}
|
public void refreshMetadata(String clusterName, MetadataResponse metadataResponse) {
List<Node> list = new ArrayList<>();
for (Node node : metadataResponse.getNodes()) {
if (node.getRole() == ClusterRole.LEADER) {
this.setLeaderNode(clusterName, node);
}
list.add(node);
}
this.storeMode = StoreMode.get(metadataResponse.getStoreMode());
if (!list.isEmpty()) {
String group = list.get(0).getGroup();
this.setNodes(clusterName, group, list);
this.clusterTerm.computeIfAbsent(clusterName, k -> new ConcurrentHashMap<>()).put(group,
metadataResponse.getTerm());
}
}
|
@Test
public void testRefreshMetadata() {
Node node = new Node();
node.setGroup("group");
node.setRole(ClusterRole.LEADER);
Node node1 = new Node();
node1.setGroup("group");
node1.setRole(ClusterRole.FOLLOWER);
List<Node> nodes = new ArrayList<>();
nodes.add(node);
nodes.add(node1);
MetadataResponse metadataResponse = new MetadataResponse();
metadataResponse.setNodes(nodes);
metadataResponse.setStoreMode(StoreMode.RAFT.getName());
Assertions.assertDoesNotThrow(() -> metadata.refreshMetadata("cluster", metadataResponse));
metadataResponse.setNodes(new ArrayList<>());
Assertions.assertDoesNotThrow(() -> metadata.refreshMetadata("cluster", metadataResponse));
}
|
public String getName() {
return name;
}
|
@Test
public void getName() throws Exception {
ProviderGroup pg = new ProviderGroup(null, null);
Assert.assertNull(pg.getName());
pg = new ProviderGroup("xxx");
Assert.assertEquals(pg.getName(), "xxx");
pg = new ProviderGroup("xxx", null);
Assert.assertEquals(pg.getName(), "xxx");
}
|
@Override
public double readDouble() throws EOFException {
return Double.longBitsToDouble(readLong());
}
|
@Test
public void testReadDouble() throws Exception {
double readDouble = in.readDouble();
long longB = Bits.readLong(INIT_DATA, 0, byteOrder == BIG_ENDIAN);
double aDouble = Double.longBitsToDouble(longB);
assertEquals(aDouble, readDouble, 0);
}
|
@Override
public List<String> getList(PropertyKey key) {
checkArgument(key.getType() == PropertyKey.PropertyType.LIST);
String value = (String) get(key);
return ConfigurationUtils.parseAsList(value, key.getDelimiter());
}
|
@Test
public void getList() {
mConfiguration.set(PropertyKey.WORKER_PAGE_STORE_DIRS, Lists.newArrayList("/a", "/b", "/c"));
assertEquals(
Lists.newArrayList("/a", "/b", "/c"),
mConfiguration.getList(PropertyKey.WORKER_PAGE_STORE_DIRS));
}
|
public List<KuduPredicate> convert(ScalarOperator operator) {
if (operator == null) {
return null;
}
return operator.accept(this, null);
}
|
@Test
public void testNot() {
ConstantOperator intOp = ConstantOperator.createInt(5);
ConstantOperator varcharOp = ConstantOperator.createVarchar("abc");
ScalarOperator ge1 = new BinaryPredicateOperator(BinaryType.GT, F0, intOp);
ScalarOperator ge2 = new BinaryPredicateOperator(BinaryType.GT, F1, varcharOp);
ScalarOperator op = new CompoundPredicateOperator(CompoundPredicateOperator.CompoundType.NOT, ge1, ge2);
List<KuduPredicate> result = CONVERTER.convert(op);
Assert.assertEquals(result.size(), 0);
}
|
public static boolean isGzipStream(byte[] bytes) {
int minByteArraySize = 2;
if (bytes == null || bytes.length < minByteArraySize) {
return false;
}
return GZIPInputStream.GZIP_MAGIC == ((bytes[1] << 8 | bytes[0]) & 0xFFFF);
}
|
@Test
public void testIsGzipStream() {
byte[] gzipBytes = new byte[2];
gzipBytes[0] = (byte) GZIPInputStream.GZIP_MAGIC;
gzipBytes[1] = (byte) (GZIPInputStream.GZIP_MAGIC >> 8);
byte[] invalidGzipBytes = new byte[2];
invalidGzipBytes[0] = (byte) (GZIPInputStream.GZIP_MAGIC + 1);
invalidGzipBytes[1] = (byte) ((GZIPInputStream.GZIP_MAGIC >> 8) + 1);
byte[] invalidGzipBytes2 = new byte[1];
byte[] normalBytes = new byte[2];
Assert.assertTrue(IoUtil.isGzipStream(gzipBytes));
Assert.assertFalse(IoUtil.isGzipStream(invalidGzipBytes));
Assert.assertFalse(IoUtil.isGzipStream(invalidGzipBytes2));
Assert.assertFalse(IoUtil.isGzipStream(null));
Assert.assertFalse(IoUtil.isGzipStream(normalBytes));
}
|
public <T> T retryable(CheckedSupplier<T> action) throws RetryException {
long attempt = 0L;
do {
try {
attempt++;
return action.get();
} catch (Exception ex) {
logger.error("Backoff retry exception", ex);
}
if (hasRetry(attempt)) {
try {
int ms = backoffTime(attempt);
logger.info("Retry({}) will execute in {} second", attempt, ms/1000.0);
Thread.sleep(ms);
} catch (InterruptedException e) {
throw new RetryException("Backoff retry aborted", e);
}
}
} while (hasRetry(attempt));
throw new RetryException("Reach max retry");
}
|
@Test
public void testWithoutException() throws Exception {
ExponentialBackoff backoff = new ExponentialBackoff(1L);
CheckedSupplier<Integer> supplier = () -> 1 + 1;
Assertions.assertThatCode(() -> backoff.retryable(supplier)).doesNotThrowAnyException();
Assertions.assertThat(backoff.retryable(supplier)).isEqualTo(2);
}
|
void clearPendingRequests() {
pendingRequests.clear();
}
|
@Test
void testClearPendingRequests() {
SharingPhysicalSlotRequestBulk bulk = createBulk();
bulk.clearPendingRequests();
assertThat(bulk.getPendingRequests()).isEmpty();
}
|
@Override
public @NotNull Iterator<E> iterator() {
return new LinkedHashIterator();
}
|
@Test
public void iterator() {
final LinkedHashSet<Integer> tested = new LinkedHashSet<>();
for (int i = 0; i < 10000; ++i) {
tested.add(i);
}
int i = 0;
for (Integer key : tested) {
Assert.assertEquals(i++, key.intValue());
tested.remove(key);
}
Assert.assertEquals(0, tested.size());
}
|
@Override
public Graph<EntityDescriptor> resolveNativeEntity(EntityDescriptor entityDescriptor) {
final MutableGraph<EntityDescriptor> mutableGraph = GraphBuilder.directed().build();
mutableGraph.addNode(entityDescriptor);
final ModelId modelId = entityDescriptor.id();
try {
final GrokPattern grokPattern = grokPatternService.load(modelId.id());
final String namedPattern = grokPattern.pattern();
final Set<String> patterns = GrokPatternService.extractPatternNames(namedPattern);
patterns.stream().forEach(patternName -> {
grokPatternService.loadByName(patternName).ifPresent(depPattern -> {
final EntityDescriptor depEntityDescriptor = EntityDescriptor.create(
depPattern.id(), ModelTypes.GROK_PATTERN_V1);
mutableGraph.putEdge(entityDescriptor, depEntityDescriptor);
});
});
} catch (NotFoundException e) {
LOG.debug("Couldn't find grok pattern {}", entityDescriptor, e);
}
return mutableGraph;
}
|
@Test
public void resolveMatchingDependecyForCreation() throws ValidationException {
final GrokPattern noDepGrokPattern = grokPatternService.save(GrokPattern.create("HALFLIFE", "\\d\\d"));
final EntityDescriptor noDepEntityDescriptor = EntityDescriptor.create(ModelId.of(noDepGrokPattern.id()),
ModelTypes.GROK_PATTERN_V1);
final GrokPattern depGrokPattern = grokPatternService.save(GrokPattern.create("PORTAL", "\\d\\d"));
final EntityDescriptor depEntityDescriptor = EntityDescriptor.create(ModelId.of(depGrokPattern.id()),
ModelTypes.GROK_PATTERN_V1);
final GrokPattern grokPattern = grokPatternService.save(GrokPattern.create("Test", "%{PORTAL}"));
final EntityDescriptor entityDescriptor = EntityDescriptor.create(ModelId.of(grokPattern.id()),
ModelTypes.GROK_PATTERN_V1);
Graph graph = facade.resolveNativeEntity(entityDescriptor);
assertThat(graph.nodes().toArray()).contains(depEntityDescriptor);
assertThat(graph.nodes().toArray()).doesNotContain(noDepEntityDescriptor);
}
|
public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, Timer timer) {
invokeCompletedOffsetCommitCallbacks();
if (offsets.isEmpty()) {
// We guarantee that the callbacks for all commitAsync() will be invoked when
// commitSync() completes, even if the user tries to commit empty offsets.
return invokePendingAsyncCommits(timer);
}
long attempts = 0L;
do {
if (coordinatorUnknownAndUnreadySync(timer)) {
return false;
}
RequestFuture<Void> future = sendOffsetCommitRequest(offsets);
client.poll(future, timer);
// We may have had in-flight offset commits when the synchronous commit began. If so, ensure that
// the corresponding callbacks are invoked prior to returning in order to preserve the order that
// the offset commits were applied.
invokeCompletedOffsetCommitCallbacks();
if (future.succeeded()) {
if (interceptors != null)
interceptors.onCommit(offsets);
return true;
}
if (future.failed() && !future.isRetriable())
throw future.exception();
timer.sleep(retryBackoff.backoff(attempts++));
} while (timer.notExpired());
return false;
}
|
@Test
public void testCommitOffsetMetadataSync() {
subscriptions.assignFromUser(singleton(t1p));
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.NONE);
OffsetAndMetadata offsetAndMetadata = new OffsetAndMetadata(100L, "hello");
Map<TopicPartition, OffsetAndMetadata> offsets = singletonMap(t1p, offsetAndMetadata);
boolean success = coordinator.commitOffsetsSync(offsets, time.timer(Long.MAX_VALUE));
assertTrue(success);
}
|
@Override
public @Nullable String getFilename() {
Path fileName = getPath().getFileName();
return fileName == null ? null : fileName.toString();
}
|
@Test
public void testGetFilename() {
assertNull(toResourceIdentifier("/").getFilename());
assertEquals("tmp", toResourceIdentifier("/root/tmp").getFilename());
assertEquals("tmp", toResourceIdentifier("/root/tmp/").getFilename());
assertEquals("xyz.txt", toResourceIdentifier("/root/tmp/xyz.txt").getFilename());
}
|
static UContinue create(@Nullable CharSequence label) {
return new AutoValue_UContinue((label == null) ? null : StringName.of(label));
}
|
@Test
public void equality() {
new EqualsTester()
.addEqualityGroup(UContinue.create(null))
.addEqualityGroup(UContinue.create("foo"))
.addEqualityGroup(UContinue.create("bar"))
.testEquals();
}
|
@Override
public boolean addClass(final Class<?> stepClass) {
if (stepClasses.contains(stepClass)) {
return true;
}
checkNoComponentAnnotations(stepClass);
if (hasCucumberContextConfiguration(stepClass)) {
checkOnlyOneClassHasCucumberContextConfiguration(stepClass);
withCucumberContextConfiguration = stepClass;
}
stepClasses.add(stepClass);
return true;
}
|
@Test
void shouldFailIfClassWithSpringComponentAnnotationsIsFound() {
final ObjectFactory factory = new SpringFactory();
Executable testMethod = () -> factory.addClass(WithComponentAnnotation.class);
CucumberBackendException actualThrown = assertThrows(CucumberBackendException.class, testMethod);
assertThat(actualThrown.getMessage(), is(equalTo(
"Glue class io.cucumber.spring.componentannotation.WithComponentAnnotation was (meta-)annotated with @Component; marking it as a candidate for auto-detection by Spring. Glue classes are detected and registered by Cucumber. Auto-detection of glue classes by spring may lead to duplicate bean definitions. Please remove the @Component (meta-)annotation")));
}
|
public static Type fromHudiType(Schema avroSchema) {
Schema.Type columnType = avroSchema.getType();
LogicalType logicalType = avroSchema.getLogicalType();
PrimitiveType primitiveType = null;
boolean isConvertedFailed = false;
switch (columnType) {
case BOOLEAN:
primitiveType = PrimitiveType.BOOLEAN;
break;
case INT:
if (logicalType instanceof LogicalTypes.Date) {
primitiveType = PrimitiveType.DATE;
} else if (logicalType instanceof LogicalTypes.TimeMillis) {
primitiveType = PrimitiveType.TIME;
} else {
primitiveType = PrimitiveType.INT;
}
break;
case LONG:
if (logicalType instanceof LogicalTypes.TimeMicros) {
primitiveType = PrimitiveType.TIME;
} else if (logicalType instanceof LogicalTypes.TimestampMillis
|| logicalType instanceof LogicalTypes.TimestampMicros) {
primitiveType = PrimitiveType.DATETIME;
} else {
primitiveType = PrimitiveType.BIGINT;
}
break;
case FLOAT:
primitiveType = PrimitiveType.FLOAT;
break;
case DOUBLE:
primitiveType = PrimitiveType.DOUBLE;
break;
case STRING:
return ScalarType.createDefaultCatalogString();
case ARRAY:
Type type = new ArrayType(fromHudiType(avroSchema.getElementType()));
if (type.isArrayType()) {
return type;
} else {
isConvertedFailed = true;
break;
}
case FIXED:
case BYTES:
if (logicalType instanceof LogicalTypes.Decimal) {
int precision = ((LogicalTypes.Decimal) logicalType).getPrecision();
int scale = ((LogicalTypes.Decimal) logicalType).getScale();
return ScalarType.createUnifiedDecimalType(precision, scale);
} else {
primitiveType = PrimitiveType.VARCHAR;
break;
}
case RECORD:
// Struct type
List<Schema.Field> fields = avroSchema.getFields();
Preconditions.checkArgument(fields.size() > 0);
ArrayList<StructField> structFields = new ArrayList<>(fields.size());
for (Schema.Field field : fields) {
String fieldName = field.name();
Type fieldType = fromHudiType(field.schema());
if (fieldType.isUnknown()) {
isConvertedFailed = true;
break;
}
structFields.add(new StructField(fieldName, fieldType));
}
if (!isConvertedFailed) {
return new StructType(structFields);
}
break;
case MAP:
Schema value = avroSchema.getValueType();
Type valueType = fromHudiType(value);
if (valueType.isUnknown()) {
isConvertedFailed = true;
break;
}
if (!isConvertedFailed) {
// Hudi map's key must be string
return new MapType(ScalarType.createDefaultCatalogString(), valueType);
}
break;
case UNION:
List<Schema> nonNullMembers = avroSchema.getTypes().stream()
.filter(schema -> !Schema.Type.NULL.equals(schema.getType()))
.collect(Collectors.toList());
if (nonNullMembers.size() == 1) {
return fromHudiType(nonNullMembers.get(0));
} else {
isConvertedFailed = true;
break;
}
case ENUM:
default:
isConvertedFailed = true;
break;
}
if (isConvertedFailed) {
primitiveType = PrimitiveType.UNKNOWN_TYPE;
}
return ScalarType.createType(primitiveType);
}
|
@Test
public void testArrayHudiSchema() {
Schema unionSchema;
Schema arraySchema;
unionSchema = Schema.createUnion(Schema.create(Schema.Type.INT));
Assert.assertEquals(fromHudiType(unionSchema), ScalarType.createType(PrimitiveType.INT));
unionSchema = Schema.createUnion(Schema.create(Schema.Type.INT));
arraySchema = Schema.createArray(unionSchema);
Schema.createArray(unionSchema);
Assert.assertEquals(fromHudiType(arraySchema), new ArrayType(ScalarType.createType(PrimitiveType.INT)));
unionSchema = Schema.createUnion(Schema.create(Schema.Type.BOOLEAN));
arraySchema = Schema.createArray(unionSchema);
Assert.assertEquals(fromHudiType(arraySchema), new ArrayType(ScalarType.createType(PrimitiveType.BOOLEAN)));
unionSchema = Schema.createUnion(Schema.create(Schema.Type.STRING));
arraySchema = Schema.createArray(unionSchema);
Assert.assertEquals(fromHudiType(arraySchema), new ArrayType(ScalarType.createDefaultCatalogString()));
unionSchema = Schema.createUnion(Schema.create(Schema.Type.BYTES));
arraySchema = Schema.createArray(unionSchema);
Assert.assertEquals(fromHudiType(arraySchema), new ArrayType(ScalarType.createType(PrimitiveType.VARCHAR)));
}
|
public static File zip(String srcPath) throws UtilException {
return zip(srcPath, DEFAULT_CHARSET);
}
|
@Test
@Disabled
public void zipMultiFileTest(){
final File[] dd={FileUtil.file("d:\\test\\qr_a.jpg")
,FileUtil.file("d:\\test\\qr_b.jpg")};
ZipUtil.zip(FileUtil.file("d:\\test\\qr.zip"),false,dd);
}
|
protected static com.github.zafarkhaja.semver.Version parseVersion(final String version) {
try {
return Version.parse(version);
} catch (Exception e) {
throw new ElasticsearchException("Unable to parse Elasticsearch version: " + version, e);
}
}
|
@Test
void testInvalidValues() {
assertThatThrownBy(() -> SearchVersion.parseVersion("v1")).isInstanceOfAny(ElasticsearchException.class);
assertThatThrownBy(() -> SearchVersion.parseVersion("1.2.x")).isInstanceOfAny(ElasticsearchException.class);
}
|
public static int parseInt(String number) throws NumberFormatException {
if (StrUtil.isBlank(number)) {
return 0;
}
if (StrUtil.startWithIgnoreCase(number, "0x")) {
// 0x04表示16进制数
return Integer.parseInt(number.substring(2), 16);
}
if (StrUtil.containsIgnoreCase(number, "E")) {
// 科学计数法忽略支持,科学计数法一般用于表示非常小和非常大的数字,这类数字转换为int后精度丢失,没有意义。
throw new NumberFormatException(StrUtil.format("Unsupported int format: [{}]", number));
}
try {
return Integer.parseInt(number);
} catch (NumberFormatException e) {
return parseNumber(number).intValue();
}
}
|
@Test
public void parseIntTest() {
int number = NumberUtil.parseInt("0xFE");
assertEquals(254, number);
// 0开头
number = NumberUtil.parseInt("010");
assertEquals(10, number);
number = NumberUtil.parseInt("10");
assertEquals(10, number);
number = NumberUtil.parseInt(" ");
assertEquals(0, number);
number = NumberUtil.parseInt("10F");
assertEquals(10, number);
number = NumberUtil.parseInt("22.4D");
assertEquals(22, number);
number = NumberUtil.parseInt("22.6D");
assertEquals(22, number);
number = NumberUtil.parseInt("0");
assertEquals(0, number);
number = NumberUtil.parseInt(".123");
assertEquals(0, number);
}
|
public List<String> getAddresses() {
if (args.length < 3) {
return Collections.singletonList(DEFAULT_BIND_ADDRESS);
}
List<String> addresses = Arrays.asList(args[2].split(","));
return addresses.stream().filter(InetAddresses::isInetAddress).collect(Collectors.toList());
}
|
@Test
void assertGetAddressesWithSingleArgument() {
assertThat(new BootstrapArguments(new String[]{"3306"}).getAddresses(), is(Collections.singletonList("0.0.0.0")));
}
|
public boolean hasContent() {
return !createItems.isEmpty() || !updateItems.isEmpty() || !deleteItems.isEmpty();
}
|
@Test
public void testHasContent() {
assertTrue(configChangeContentBuilder.hasContent());
configChangeContentBuilder.getCreateItems().clear();
assertTrue(configChangeContentBuilder.hasContent());
configChangeContentBuilder.getUpdateItems().clear();
assertTrue(configChangeContentBuilder.hasContent());
}
|
@Override
public void onMsg(TbContext ctx, TbMsg msg) {
String serviceIdStr = msg.getMetaData().getValue(config.getServiceIdMetaDataAttribute());
String sessionIdStr = msg.getMetaData().getValue(config.getSessionIdMetaDataAttribute());
String requestIdStr = msg.getMetaData().getValue(config.getRequestIdMetaDataAttribute());
if (msg.getOriginator().getEntityType() != EntityType.DEVICE) {
ctx.tellFailure(msg, new RuntimeException("Message originator is not a device entity!"));
} else if (StringUtils.isEmpty(requestIdStr)) {
ctx.tellFailure(msg, new RuntimeException("Request id is not present in the metadata!"));
} else if (StringUtils.isEmpty(serviceIdStr)) {
ctx.tellFailure(msg, new RuntimeException("Service id is not present in the metadata!"));
} else if (StringUtils.isEmpty(sessionIdStr)) {
ctx.tellFailure(msg, new RuntimeException("Session id is not present in the metadata!"));
} else if (StringUtils.isEmpty(msg.getData())) {
ctx.tellFailure(msg, new RuntimeException("Request body is empty!"));
} else {
if (StringUtils.isNotBlank(msg.getMetaData().getValue(DataConstants.EDGE_ID))) {
saveRpcResponseToEdgeQueue(ctx, msg, serviceIdStr, sessionIdStr, requestIdStr);
} else {
ctx.getRpcService().sendRpcReplyToDevice(serviceIdStr, UUID.fromString(sessionIdStr), Integer.parseInt(requestIdStr), msg.getData());
ctx.tellSuccess(msg);
}
}
}
|
@Test
public void sendReplyToEdgeQueue() {
when(ctx.getTenantId()).thenReturn(tenantId);
when(ctx.getEdgeEventService()).thenReturn(edgeEventService);
when(edgeEventService.saveAsync(any())).thenReturn(SettableFuture.create());
when(ctx.getDbCallbackExecutor()).thenReturn(listeningExecutor);
TbMsgMetaData defaultMetadata = getDefaultMetadata();
defaultMetadata.putValue(DataConstants.EDGE_ID, UUID.randomUUID().toString());
defaultMetadata.putValue(DataConstants.DEVICE_ID, UUID.randomUUID().toString());
TbMsg msg = TbMsg.newMsg(TbMsgType.POST_TELEMETRY_REQUEST, deviceId, defaultMetadata,
TbMsgDataType.JSON, DUMMY_DATA, null, null);
node.onMsg(ctx, msg);
verify(edgeEventService).saveAsync(any());
verify(rpcService, never()).sendRpcReplyToDevice(DUMMY_SERVICE_ID, DUMMY_SESSION_ID, DUMMY_REQUEST_ID, DUMMY_DATA);
}
|
@Override
public ProviderCert getProviderConnectionConfig(URL localAddress) {
CertPair certPair = dubboCertManager.generateCert();
if (certPair == null) {
return null;
}
return new ProviderCert(
certPair.getCertificate().getBytes(StandardCharsets.UTF_8),
certPair.getPrivateKey().getBytes(StandardCharsets.UTF_8),
certPair.getTrustCerts().getBytes(StandardCharsets.UTF_8),
AuthPolicy.NONE);
}
|
@Test
void getProviderConnectionConfigTest() {
AtomicReference<DubboCertManager> reference = new AtomicReference<>();
try (MockedConstruction<DubboCertManager> construction =
Mockito.mockConstruction(DubboCertManager.class, (mock, context) -> {
reference.set(mock);
})) {
FrameworkModel frameworkModel = new FrameworkModel();
DubboCertProvider provider = new DubboCertProvider(frameworkModel);
Assertions.assertNull(provider.getProviderConnectionConfig(null));
CertPair certPair = new CertPair("privateKey", "publicKey", "trustCerts", 12345);
Mockito.when(reference.get().generateCert()).thenReturn(certPair);
ProviderCert providerConnectionConfig = provider.getProviderConnectionConfig(null);
Assertions.assertArrayEquals("privateKey".getBytes(), providerConnectionConfig.getPrivateKey());
Assertions.assertArrayEquals("publicKey".getBytes(), providerConnectionConfig.getKeyCertChain());
Assertions.assertArrayEquals("trustCerts".getBytes(), providerConnectionConfig.getTrustCert());
Assertions.assertEquals(AuthPolicy.NONE, providerConnectionConfig.getAuthPolicy());
frameworkModel.destroy();
}
}
|
@Override
public ImportResult importItem(
UUID jobId,
IdempotentImportExecutor idempotentImportExecutor,
TokensAndUrlAuthData authData,
PhotosContainerResource resource)
throws Exception {
KoofrClient koofrClient = koofrClientFactory.create(authData);
monitor.debug(
() ->
String.format(
"%s: Importing %s albums and %s photos before transmogrification",
jobId, resource.getAlbums().size(), resource.getPhotos().size()));
// Make the data Koofr compatible
resource.transmogrify(transmogrificationConfig);
monitor.debug(
() ->
String.format(
"%s: Importing %s albums and %s photos after transmogrification",
jobId, resource.getAlbums().size(), resource.getPhotos().size()));
for (PhotoAlbum album : resource.getAlbums()) {
// Create a Koofr folder and then save the id with the mapping data
idempotentImportExecutor.executeAndSwallowIOExceptions(
album.getId(), album.getName(), () -> createAlbumFolder(album, koofrClient));
}
final LongAdder totalImportedFilesSizes = new LongAdder();
for (PhotoModel photoModel : resource.getPhotos()) {
idempotentImportExecutor.importAndSwallowIOExceptions(
photoModel,
photo -> {
ItemImportResult<String> fileImportResult =
importSinglePhoto(photoModel, jobId, idempotentImportExecutor, koofrClient);
if (fileImportResult != null && fileImportResult.hasBytes()) {
totalImportedFilesSizes.add(fileImportResult.getBytes());
}
return fileImportResult;
});
}
return ImportResult.OK.copyWithBytes(totalImportedFilesSizes.longValue());
}
|
@Test
public void testImportItemFromJobStoreUserTimeZoneCalledOnce() throws Exception {
ByteArrayInputStream inputStream = new ByteArrayInputStream(new byte[] {0, 1, 2, 3, 4});
when(jobStore.getStream(any(), any())).thenReturn(new InputStreamWrapper(inputStream, 5L));
UUID jobId = UUID.randomUUID();
PortabilityJob job = mock(PortabilityJob.class);
when(job.userTimeZone()).thenReturn(TimeZone.getTimeZone("Europe/Rome"));
when(jobStore.findJob(jobId)).thenReturn(job);
Collection<PhotoAlbum> albums =
ImmutableList.of(new PhotoAlbum("id1", "Album 1", "This is a fake album"));
DateFormat format = new SimpleDateFormat("yyyy:MM:dd HH:mm:ss");
format.setTimeZone(TimeZone.getTimeZone("Europe/Kiev"));
Collection<PhotoModel> photos1 =
ImmutableList.of(
new PhotoModel(
"pic1.jpg",
"http://fake.com/1.jpg",
"A pic",
"image/jpeg",
"p1",
"id1",
true,
format.parse("2021:02:16 11:55:00")));
Collection<PhotoModel> photos2 =
ImmutableList.of(
new PhotoModel(
"pic2.jpg",
"http://fake.com/2.jpg",
"A pic",
"image/jpeg",
"p2",
"id1",
true,
format.parse("2021:02:17 11:55:00")));
PhotosContainerResource resource1 = spy(new PhotosContainerResource(albums, photos1));
PhotosContainerResource resource2 = spy(new PhotosContainerResource(albums, photos2));
importer.importItem(jobId, executor, authData, resource1);
importer.importItem(jobId, executor, authData, resource2);
InOrder clientInOrder = Mockito.inOrder(client);
String[] titles = {"2021-02-16 10.55.00 pic1.jpg", "2021-02-17 10.55.00 pic2.jpg"};
for (String title : titles) {
clientInOrder.verify(client).uploadFile(any(), eq(title), any(), any(), any(), any());
}
verify(jobStore, atMostOnce()).findJob(jobId);
}
|
Object getFromSignalDependency(String signalDependencyName, String paramName) {
try {
return executor
.submit(() -> fromSignalDependency(signalDependencyName, paramName))
.get(TIMEOUT_IN_MILLIS, TimeUnit.MILLISECONDS);
} catch (Exception e) {
throw new MaestroInternalError(
e,
"getFromSignalDependency throws an exception for signalDependencyName=[%s], paramName=[%s]",
signalDependencyName,
paramName);
}
}
|
@Test
public void testInvalidGetFromSignalDependency() {
when(signalDependenciesParams.get("dev/foo/bar"))
.thenReturn(
Collections.singletonList(
Collections.singletonMap(
"param1", StringParameter.builder().evaluatedResult("hello").build())));
AssertHelper.assertThrows(
"Referenced param in signal dependencies does not exist yet.",
MaestroInternalError.class,
"getFromSignalDependency throws an exception for ",
() -> paramExtension.getFromSignalDependency("dev/foo/bar", "param2"));
when(signalDependenciesParams.get("dev/foo/bar")).thenReturn(Collections.emptyList());
AssertHelper.assertThrows(
"Referenced signal dependencies does not exist",
MaestroInternalError.class,
"getFromSignalDependency throws an exception for ",
() -> paramExtension.getFromSignalDependency("dev/foo/bar", "param1"));
when(signalDependenciesParams.get("dev/foo/bar")).thenReturn(null);
AssertHelper.assertThrows(
"Referenced signal dependencies does not exist",
MaestroInternalError.class,
"getFromSignalDependency throws an exception for ",
() -> paramExtension.getFromSignalDependency("dev/foo/bar", "param1"));
when(signalDependenciesParams.get("dev/foo/bar"))
.thenReturn(
Arrays.asList(
Collections.singletonMap(
"param1", StringParameter.builder().evaluatedResult("hello").build()),
Collections.singletonMap(
"param2", StringParameter.builder().evaluatedResult("world").build())));
AssertHelper.assertThrows(
"Referenced signal dependencies does not exist",
MaestroInternalError.class,
"getFromSignalDependency throws an exception for ",
() -> paramExtension.getFromSignalDependency("dev/foo/bar", "param1"));
}
|
@Override
protected List<String> deleteObjects(List<String> keys) throws IOException {
try {
DeleteObjectsRequest request = new DeleteObjectsRequest(mBucketName);
request.setKeys(keys);
DeleteObjectsResult result = mClient.deleteObjects(request);
return result.getDeletedObjects();
} catch (ServiceException e) {
throw new IOException("Failed to delete objects", e);
}
}
|
@Test
public void testDeleteObjects() throws IOException {
String[] stringKeys = new String[]{"key1", "key2", "key3"};
List<String> keys = new ArrayList<>();
Collections.addAll(keys, stringKeys);
// test successful delete objects
Mockito.when(mClient.deleteObjects(ArgumentMatchers.any(DeleteObjectsRequest.class)))
.thenReturn(new DeleteObjectsResult(keys));
List<String> result = mOSSUnderFileSystem.deleteObjects(keys);
Assert.assertEquals(keys, result);
// test delete objects exception
Mockito.when(mClient.deleteObjects(ArgumentMatchers.any(DeleteObjectsRequest.class)))
.thenThrow(ServiceException.class);
try {
mOSSUnderFileSystem.deleteObjects(keys);
} catch (Exception e) {
Assert.assertTrue(e instanceof IOException);
}
}
|
public int getErrCode() {
return errCode;
}
|
@Test
void testConstructorWithFull() {
NacosRuntimeException exception = new NacosRuntimeException(NacosException.INVALID_PARAM, "test",
new RuntimeException("cause test"));
assertEquals(NacosException.INVALID_PARAM, exception.getErrCode());
assertEquals("errCode: 400, errMsg: test ", exception.getMessage());
assertTrue(exception.getCause() instanceof RuntimeException);
}
|
public void addClusterMetadata(Service service, String clusterName, ClusterMetadata clusterMetadata) {
MetadataOperation<ServiceMetadata> operation = buildMetadataOperation(service);
ServiceMetadata serviceMetadata = new ServiceMetadata();
serviceMetadata.setEphemeral(service.isEphemeral());
serviceMetadata.getClusters().put(clusterName, clusterMetadata);
operation.setMetadata(serviceMetadata);
WriteRequest operationLog = WriteRequest.newBuilder().setGroup(Constants.SERVICE_METADATA)
.setOperation(DataOperation.ADD.name()).setData(ByteString.copyFrom(serializer.serialize(operation)))
.build();
submitMetadataOperation(operationLog);
}
|
@Test
void testAddClusterMetadata() {
assertThrows(NacosRuntimeException.class, () -> {
String clusterName = "clusterName";
ClusterMetadata clusterMetadata = new ClusterMetadata();
namingMetadataOperateService.addClusterMetadata(service, clusterName, clusterMetadata);
Mockito.verify(service).getNamespace();
Mockito.verify(service).getGroup();
Mockito.verify(service).getName();
});
}
|
@Override
public Consumer createConsumer(Processor aProcessor) throws Exception {
// validate that all of the endpoint is configured properly
if (getMonitorType() != null) {
if (!isPlatformServer()) {
throw new IllegalArgumentException(ERR_PLATFORM_SERVER);
}
if (ObjectHelper.isEmpty(getObservedAttribute())) {
throw new IllegalArgumentException(ERR_OBSERVED_ATTRIBUTE);
}
if (getMonitorType().equals("string")) {
if (ObjectHelper.isEmpty(getStringToCompare())) {
throw new IllegalArgumentException(ERR_STRING_TO_COMPARE);
}
if (!isNotifyDiffer() && !isNotifyMatch()) {
throw new IllegalArgumentException(ERR_STRING_NOTIFY);
}
} else if (getMonitorType().equals("gauge")) {
if (!isNotifyHigh() && !isNotifyLow()) {
throw new IllegalArgumentException(ERR_GAUGE_NOTIFY);
}
if (getThresholdHigh() == null) {
throw new IllegalArgumentException(ERR_THRESHOLD_HIGH);
}
if (getThresholdLow() == null) {
throw new IllegalArgumentException(ERR_THRESHOLD_LOW);
}
}
JMXMonitorConsumer answer = new JMXMonitorConsumer(this, aProcessor);
configureConsumer(answer);
return answer;
} else {
// shouldn't need any other validation.
JMXConsumer answer = new JMXConsumer(this, aProcessor);
configureConsumer(answer);
return answer;
}
}
|
@Test
public void noThresholdHigh() throws Exception {
JMXEndpoint ep = context.getEndpoint(
"jmx:platform?objectDomain=FooDomain&objectName=theObjectName&monitorType=gauge&observedAttribute=foo&thresholdLow=100¬ifyHigh=true",
JMXEndpoint.class);
try {
ep.createConsumer(null);
fail("expected exception");
} catch (IllegalArgumentException e) {
assertEquals(JMXEndpoint.ERR_THRESHOLD_HIGH, e.getMessage());
}
}
|
public static String toString(String unicode) {
if (StrUtil.isBlank(unicode)) {
return unicode;
}
final int len = unicode.length();
StringBuilder sb = new StringBuilder(len);
int i;
int pos = 0;
while ((i = StrUtil.indexOfIgnoreCase(unicode, "\\u", pos)) != -1) {
sb.append(unicode, pos, i);//写入Unicode符之前的部分
pos = i;
if (i + 5 < len) {
char c;
try {
c = (char) Integer.parseInt(unicode.substring(i + 2, i + 6), 16);
sb.append(c);
pos = i + 6;//跳过整个Unicode符
} catch (NumberFormatException e) {
//非法Unicode符,跳过
sb.append(unicode, pos, i + 2);//写入"\\u"
pos = i + 2;
}
} else {
//非Unicode符,结束
break;
}
}
if (pos < len) {
sb.append(unicode, pos, len);
}
return sb.toString();
}
|
@Test
public void convertTest5() {
String str = "{\"code\":403,\"enmsg\":\"Product not found\",\"cnmsg\":\"\\u4ea7\\u54c1\\u4e0d\\u5b58\\u5728\\uff0c\\u6216\\u5df2\\u5220\\u9664\",\"data\":null}";
String res = UnicodeUtil.toString(str);
assertEquals("{\"code\":403,\"enmsg\":\"Product not found\",\"cnmsg\":\"产品不存在,或已删除\",\"data\":null}", res);
}
|
@Override
public ConsumerBuilder<T> patternAutoDiscoveryPeriod(int periodInMinutes) {
checkArgument(periodInMinutes >= 0, "periodInMinutes needs to be >= 0");
patternAutoDiscoveryPeriod(periodInMinutes, TimeUnit.MINUTES);
return this;
}
|
@Test(expectedExceptions = IllegalArgumentException.class)
public void testConsumerBuilderImplWhenPatternAutoDiscoveryPeriodPeriodIsNegative() {
consumerBuilderImpl.patternAutoDiscoveryPeriod(-1, TimeUnit.MINUTES);
}
|
public static boolean isActiveBody( String method ) {
if ( Utils.isEmpty( method ) ) {
return false;
}
return ( method.equals( HTTP_METHOD_POST ) || method.equals( HTTP_METHOD_PUT ) || method.equals( HTTP_METHOD_PATCH ) );
}
|
@Test
public void testEntityEnclosingMethods() {
assertTrue( RestMeta.isActiveBody( RestMeta.HTTP_METHOD_POST ) );
assertTrue( RestMeta.isActiveBody( RestMeta.HTTP_METHOD_PUT ) );
assertTrue( RestMeta.isActiveBody( RestMeta.HTTP_METHOD_PATCH ) );
assertFalse( RestMeta.isActiveBody( RestMeta.HTTP_METHOD_GET ) );
assertFalse( RestMeta.isActiveBody( RestMeta.HTTP_METHOD_DELETE ) );
assertFalse( RestMeta.isActiveBody( RestMeta.HTTP_METHOD_HEAD ) );
assertFalse( RestMeta.isActiveBody( RestMeta.HTTP_METHOD_OPTIONS ) );
}
|
public static boolean shutdownExecutorForcefully(ExecutorService executor, Duration timeout) {
return shutdownExecutorForcefully(executor, timeout, true);
}
|
@Test
void testShutdownExecutorForcefully() {
MockExecutorService executor = new MockExecutorService(5);
assertThat(
ComponentClosingUtils.shutdownExecutorForcefully(
executor, Duration.ofDays(1), false))
.isTrue();
assertThat(executor.forcefullyShutdownCount).isEqualTo(5);
}
|
public Acl deserialize(T serialized) {
final Deserializer<?> dict = deserializer.create(serialized);
final Acl acl = new Acl();
final List<String> keys = dict.keys();
for(String key : keys) {
final Acl.CanonicalUser user = new Acl.CanonicalUser(key);
acl.addAll(user);
final List<Object> rolesObj = dict.listForKey(key);
for(Object roleObj : rolesObj) {
acl.get(user).add(new Acl.RoleDictionary(deserializer).deserialize(roleObj));
}
}
return acl;
}
|
@Test
public void testSerialize() {
Acl attributes = new Acl(new Acl.UserAndRole(new Acl.CanonicalUser(), new Acl.Role("w")));
Acl clone = new AclDictionary<>().deserialize(attributes.serialize(SerializerFactory.get()));
assertEquals(attributes.get(new Acl.CanonicalUser()), clone.get(new Acl.CanonicalUser()));
assertNotEquals(attributes, new Acl(new Acl.UserAndRole(new Acl.CanonicalUser("t"), new Acl.Role("w"))));
assertEquals(attributes.get(new Acl.CanonicalUser()), clone.get(new Acl.CanonicalUser()));
assertNotEquals(attributes.get(new Acl.CanonicalUser()), new Acl(new Acl.UserAndRole(new Acl.CanonicalUser(""), new Acl.Role("r"))).get(new Acl.CanonicalUser()));
}
|
protected long mergeNumDistinctValueEstimator(String columnName, List<NumDistinctValueEstimator> estimators,
long oldNumDVs, long newNumDVs) {
if (estimators == null || estimators.size() != 2) {
throw new IllegalArgumentException("NDV estimators list must be set and contain exactly two elements, " +
"found " + (estimators == null ? "null" :
estimators.stream().map(NumDistinctValueEstimator::toString).collect(Collectors.joining(", "))));
}
NumDistinctValueEstimator oldEst = estimators.get(0);
NumDistinctValueEstimator newEst = estimators.get(1);
if (oldEst == null && newEst == null) {
return mergeNumDVs(oldNumDVs, newNumDVs);
}
if (oldEst == null) {
estimators.set(0, newEst);
return mergeNumDVs(oldNumDVs, newEst.estimateNumDistinctValues());
}
final long ndv;
if (oldEst.canMerge(newEst)) {
oldEst.mergeEstimators(newEst);
ndv = oldEst.estimateNumDistinctValues();
return ndv;
} else {
ndv = mergeNumDVs(oldNumDVs, newNumDVs);
}
LOG.debug("Use bitvector to merge column {}'s ndvs of {} and {} to be {}", columnName,
oldNumDVs, newNumDVs, ndv);
return ndv;
}
|
@Test
public void testMergeNDVEstimatorsSecondNull() {
NumDistinctValueEstimator estimator1 =
NumDistinctValueEstimatorFactory.getNumDistinctValueEstimator(HLL_1.serialize());
for (ColumnStatsMerger<?> MERGER : MERGERS) {
List<NumDistinctValueEstimator> estimatorList = Arrays.asList(estimator1, null);
long computedNDV = MERGER.mergeNumDistinctValueEstimator("", estimatorList, 2, 1);
assertEquals(Arrays.asList(estimator1, null), estimatorList);
assertEquals(2, computedNDV);
}
}
|
protected synchronized boolean ensureCoordinatorReady(final Timer timer) {
return ensureCoordinatorReady(timer, false);
}
|
@Test
public void testCoordinatorDiscoveryExponentialBackoff() {
// With exponential backoff, we will get retries at 10, 20, 40, 80, 100 ms (with jitter)
int shortRetryBackoffMs = 10;
int shortRetryBackoffMaxMs = 100;
setupCoordinator(shortRetryBackoffMs, shortRetryBackoffMaxMs);
for (int i = 0; i < 5; i++) {
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
}
// cut out the coordinator for 100 milliseconds to simulate a disconnect.
// after backing off, we should be able to connect.
mockClient.backoff(coordinatorNode, 100L);
long initialTime = mockTime.milliseconds();
coordinator.ensureCoordinatorReady(mockTime.timer(Long.MAX_VALUE));
long endTime = mockTime.milliseconds();
long lowerBoundBackoffMs = 0;
long upperBoundBackoffMs = 0;
for (int i = 0; i < 4; i++) {
lowerBoundBackoffMs += (long) (shortRetryBackoffMs * Math.pow(CommonClientConfigs.RETRY_BACKOFF_EXP_BASE, i) * (1 - CommonClientConfigs.RETRY_BACKOFF_JITTER));
upperBoundBackoffMs += (long) (shortRetryBackoffMs * Math.pow(CommonClientConfigs.RETRY_BACKOFF_EXP_BASE, i) * (1 + CommonClientConfigs.RETRY_BACKOFF_JITTER));
}
long timeElapsed = endTime - initialTime;
assertTrue(timeElapsed >= lowerBoundBackoffMs);
assertTrue(timeElapsed <= upperBoundBackoffMs + shortRetryBackoffMs);
}
|
public static KTableHolder<GenericKey> build(
final KGroupedStreamHolder groupedStream,
final StreamAggregate aggregate,
final RuntimeBuildContext buildContext,
final MaterializedFactory materializedFactory) {
return build(
groupedStream,
aggregate,
buildContext,
materializedFactory,
new AggregateParamsFactory()
);
}
|
@Test
public void shouldBuildKeySerdeCorrectlyForWindowedAggregate() {
for (final Runnable given : given()) {
// Given:
clearInvocations(groupedStream, timeWindowedStream, sessionWindowedStream, aggregated, buildContext);
given.run();
// When:
windowedAggregate.build(planBuilder, planInfo);
// Then:
verify(buildContext)
.buildKeySerde(KEY_FORMAT, PHYSICAL_AGGREGATE_SCHEMA, MATERIALIZE_CTX);
}
}
|
public void harvest(Consumer<AbstractHistogram> consumer) {
AbstractHistogram current = claim(_current);
_current.set(claim(_inactive)); //unblock other writers
try {
consumer.accept(current);
} catch (Throwable t) {
LOG.error("failed to consume histogram for latencies metric", t);
} finally {
current.reset();
_inactive.set(current);
}
}
|
@Test
public void testNoRecording()
{
LatencyMetric metric = new LatencyMetric();
final AtomicLong totalCount = new AtomicLong();
metric.harvest(h -> totalCount.set(h.getTotalCount()));
assertEquals(totalCount.get(), 0L);
metric.harvest(h -> totalCount.set(h.getTotalCount()));
assertEquals(totalCount.get(), 0L);
}
|
public Optional<Measure> toMeasure(@Nullable MeasureDto measureDto, Metric metric) {
requireNonNull(metric);
if (measureDto == null) {
return Optional.empty();
}
Double value = measureDto.getValue();
String data = measureDto.getData();
switch (metric.getType().getValueType()) {
case INT:
return toIntegerMeasure(measureDto, value, data);
case LONG:
return toLongMeasure(measureDto, value, data);
case DOUBLE:
return toDoubleMeasure(measureDto, value, data);
case BOOLEAN:
return toBooleanMeasure(measureDto, value, data);
case STRING:
return toStringMeasure(measureDto, data);
case LEVEL:
return toLevelMeasure(measureDto, data);
case NO_VALUE:
return toNoValueMeasure(measureDto);
default:
throw new IllegalArgumentException("Unsupported Measure.ValueType " + metric.getType().getValueType());
}
}
|
@Test
public void toMeasure_maps_alert_properties_in_dto_for_String_Metric() {
MeasureDto measureDto = new MeasureDto().setData(SOME_DATA).setAlertStatus(Level.OK.name()).setAlertText(SOME_ALERT_TEXT);
Optional<Measure> measure = underTest.toMeasure(measureDto, SOME_STRING_METRIC);
assertThat(measure).isPresent();
assertThat(measure.get().getValueType()).isEqualTo(Measure.ValueType.STRING);
assertThat(measure.get().getStringValue()).isEqualTo(SOME_DATA);
assertThat(measure.get().getData()).isEqualTo(SOME_DATA);
assertThat(measure.get().getQualityGateStatus().getStatus()).isEqualTo(Level.OK);
assertThat(measure.get().getQualityGateStatus().getText()).isEqualTo(SOME_ALERT_TEXT);
}
|
static Map<String, Object> addField(Map<String, Object> event, Map<String, Object> fieldsToAdd) {
Event tempEvent = new org.logstash.Event(event);
addField(tempEvent, fieldsToAdd);
return tempEvent.getData();
}
|
@Test
public void testAddField() {
// add field to empty event
Event e = new Event();
String testField = "test_field";
String testStringValue = "test_value";
CommonActions.addField(e, Collections.singletonMap(testField, testStringValue));
Assert.assertEquals(testStringValue, e.getField(testField));
// add to existing field and convert to array value
e = new Event(Collections.singletonMap(testField, testStringValue));
CommonActions.addField(e, Collections.singletonMap(testField, testStringValue));
Object value = e.getField(testField);
Assert.assertTrue(value instanceof List);
Assert.assertEquals(2, ((List) value).size());
Assert.assertEquals(testStringValue, ((List) value).get(0));
Assert.assertEquals(testStringValue, ((List) value).get(1));
// add to existing array field
String testStringValue2 = "test_value2";
List<String> stringVals = Arrays.asList(testStringValue, testStringValue2);
e = new Event(Collections.singletonMap(testField, stringVals));
CommonActions.addField(e, Collections.singletonMap(testField, testStringValue));
value = e.getField(testField);
Assert.assertTrue(value instanceof List);
Assert.assertEquals(3, ((List) value).size());
Assert.assertEquals(testStringValue, ((List) value).get(0));
Assert.assertEquals(testStringValue2, ((List) value).get(1));
Assert.assertEquals(testStringValue, ((List) value).get(2));
// add non-string value to empty event
Long testLongValue = 42L;
e = new Event();
CommonActions.addField(e, Collections.singletonMap(testField, testLongValue));
Assert.assertEquals(testLongValue, e.getField(testField));
// add non-string value to existing field
e = new Event(Collections.singletonMap(testField, testStringValue));
CommonActions.addField(e, Collections.singletonMap(testField, testLongValue));
value = e.getField(testField);
Assert.assertTrue(value instanceof List);
Assert.assertEquals(2, ((List) value).size());
Assert.assertEquals(testStringValue, ((List) value).get(0));
Assert.assertEquals(testLongValue, ((List) value).get(1));
// add non-string value to existing array field
e = new Event(Collections.singletonMap(testField, stringVals));
CommonActions.addField(e, Collections.singletonMap(testField, testLongValue));
value = e.getField(testField);
Assert.assertTrue(value instanceof List);
Assert.assertEquals(3, ((List) value).size());
Assert.assertEquals(testStringValue, ((List) value).get(0));
Assert.assertEquals(testStringValue2, ((List) value).get(1));
Assert.assertEquals(testLongValue, ((List) value).get(2));
// add field/value with dynamic values
e = new Event(Collections.singletonMap(testField, testStringValue));
String newField = "%{" + testField + "}_field";
String newValue = "%{" + testField + "}_value";
CommonActions.addField(e, Collections.singletonMap(newField, newValue));
Assert.assertEquals(testStringValue + "_value", e.getField(testStringValue + "_field"));
}
|
abstract public <T extends ComponentRoot> T get(Class<T> providerId);
|
@Test
public void testAppRoot_withEfestoAppRootAsComponentRoot() {
LocalComponentIdA retrievedA = new ReflectiveAppRoot()
.get(EfestoAppRoot.class)
.get(EfestoComponentRootBar.class)
.get(ComponentRootA.class)
.get("fileName", "name");
assertThat(retrievedA).isNotNull();
LocalComponentIdB retrievedB = new ReflectiveAppRoot()
.get(EfestoAppRoot.class)
.get(EfestoComponentRootBar.class)
.get(ComponentRootB.class)
.get("fileName", "name", "secondName");
assertThat(retrievedB).isNotNull();
LocalComponentIdFoo retrievedFoo = new ReflectiveAppRoot()
.get(EfestoAppRoot.class)
.get(EfestoComponentRootBar.class)
.get(ComponentFoo.class)
.get("fileName", "name", "secondName");
assertThat(retrievedFoo).isNotNull();
}
|
public Header getHeader() {
return header;
}
|
@Test
public void testHeader() {
Assertions.assertDoesNotThrow(() -> generateDataFilePredictable());
File reportFile = new File(testDir, "test.data");
Assumptions.assumeTrue(reportFile.exists());
try (LogReader reader = new LogReader(reportFile)) {
Header fileHeader = reader.getHeader();
assertEquals(Header.FORMAT_NAME, fileHeader.getFormatName().trim());
assertEquals(Header.CURRENT_FILE_VERSION, fileHeader.getFileVersion());
} catch (IOException e) {
Assertions.fail(e.getMessage());
}
}
|
public String getSubscriptionDurability() {
if (!isEmpty(subscriptionDurability)) {
return subscriptionDurability;
}
return null;
}
|
@Test(timeout = 60000)
public void testDefaultSubscriptionDurabilitySetCorrectly() {
assertEquals("Incorrect default value", ActiveMQActivationSpec.NON_DURABLE_SUBSCRIPTION, activationSpec.getSubscriptionDurability());
}
|
public <T> MongoCollection<T> nonEntityCollection(String collectionName, Class<T> valueType) {
return getCollection(collectionName, valueType);
}
|
@Test
void testEncryptedValue() {
final MongoCollection<Secret> collection = collections.nonEntityCollection("secrets", Secret.class);
final EncryptedValue encryptedValue = encryptedValueService.encrypt("gary");
collection.insertOne(new Secret(encryptedValue));
assertThat(collection.find().first()).isNotNull().satisfies(secret -> {
final EncryptedValue readValue = secret.encryptedValue();
assertThat(readValue.isSet()).isTrue();
assertThat(encryptedValueService.decrypt(readValue)).isEqualTo("gary");
});
}
|
public void refreshConnectorTableBasicStatisticsCache(String catalogName, String dbName, String tableName,
List<String> columns, boolean async) {
Table table;
try {
table = MetaUtils.getTable(catalogName, dbName, tableName);
} catch (Exception e) {
return;
}
GlobalStateMgr.getCurrentState().getStatisticStorage().expireConnectorTableColumnStatistics(table, columns);
if (async) {
GlobalStateMgr.getCurrentState().getStatisticStorage().getConnectorTableStatistics(table, columns);
} else {
GlobalStateMgr.getCurrentState().getStatisticStorage().getConnectorTableStatisticsSync(table, columns);
}
}
|
@Test
public void testRefreshConnectorTableBasicStatisticsCache(@Mocked CachedStatisticStorage cachedStatisticStorage) {
Table table = connectContext.getGlobalStateMgr().getMetadataMgr().getTable("hive0", "partitioned_db", "t1");
new Expectations() {
{
cachedStatisticStorage.getConnectorTableStatistics(table, ImmutableList.of("c1", "c2"));
result = ImmutableList.of(
new ConnectorTableColumnStats(new ColumnStatistic(0, 10, 0, 20, 5), 5),
new ConnectorTableColumnStats(new ColumnStatistic(0, 100, 0, 200, 50), 50)
);
minTimes = 1;
}
};
AnalyzeMgr analyzeMgr = new AnalyzeMgr();
analyzeMgr.refreshConnectorTableBasicStatisticsCache("hive0", "partitioned_db", "t1",
ImmutableList.of("c1", "c2"), true);
new Expectations() {
{
cachedStatisticStorage.getConnectorTableStatisticsSync(table, ImmutableList.of("c1", "c2"));
result = ImmutableList.of(
new ConnectorTableColumnStats(new ColumnStatistic(0, 10, 0, 20, 5), 5),
new ConnectorTableColumnStats(new ColumnStatistic(0, 100, 0, 200, 50), 50)
);
minTimes = 1;
}
};
analyzeMgr.refreshConnectorTableBasicStatisticsCache("hive0", "partitioned_db", "t1",
ImmutableList.of("c1", "c2"), false);
new MockUp<MetaUtils>() {
@Mock
public Table getTable(String catalogName, String dbName, String tableName) {
throw new RuntimeException("mock get table exception");
}
};
analyzeMgr.refreshConnectorTableBasicStatisticsCache("hive0", "partitioned_db", "t1",
ImmutableList.of("c1", "c2"), false);
}
|
@Override
public boolean renameTo(File dest) throws IOException {
ObjectUtil.checkNotNull(dest, "dest");
if (byteBuf == null) {
// empty file
if (!dest.createNewFile()) {
throw new IOException("file exists already: " + dest);
}
return true;
}
int length = byteBuf.readableBytes();
long written = 0;
RandomAccessFile accessFile = new RandomAccessFile(dest, "rw");
try {
FileChannel fileChannel = accessFile.getChannel();
try {
if (byteBuf.nioBufferCount() == 1) {
ByteBuffer byteBuffer = byteBuf.nioBuffer();
while (written < length) {
written += fileChannel.write(byteBuffer);
}
} else {
ByteBuffer[] byteBuffers = byteBuf.nioBuffers();
while (written < length) {
written += fileChannel.write(byteBuffers);
}
}
fileChannel.force(false);
} finally {
fileChannel.close();
}
} finally {
accessFile.close();
}
return written == length;
}
|
@Test
public void testRenameTo() throws Exception {
TestHttpData test = new TestHttpData("test", UTF_8, 0);
try {
File tmpFile = PlatformDependent.createTempFile(UUID.randomUUID().toString(), ".tmp", null);
tmpFile.deleteOnExit();
final int totalByteCount = 4096;
byte[] bytes = new byte[totalByteCount];
PlatformDependent.threadLocalRandom().nextBytes(bytes);
ByteBuf content = Unpooled.wrappedBuffer(bytes);
test.setContent(content);
boolean succ = test.renameTo(tmpFile);
assertTrue(succ);
FileInputStream fis = new FileInputStream(tmpFile);
try {
byte[] buf = new byte[totalByteCount];
int count = 0;
int offset = 0;
int size = totalByteCount;
while ((count = fis.read(buf, offset, size)) > 0) {
offset += count;
size -= count;
if (offset >= totalByteCount || size <= 0) {
break;
}
}
assertArrayEquals(bytes, buf);
assertEquals(0, fis.available());
} finally {
fis.close();
}
} finally {
//release the ByteBuf in AbstractMemoryHttpData
test.delete();
}
}
|
public boolean addAll(final IntArrayList list)
{
@DoNotSub final int numElements = list.size;
if (numElements > 0)
{
ensureCapacityPrivate(size + numElements);
System.arraycopy(list.elements, 0, elements, size, numElements);
size += numElements;
return true;
}
return false;
}
|
@Test
void shouldCreateObjectRefArray()
{
final int count = 20;
final List<Integer> expected = new ArrayList<>();
IntStream.range(0, count).forEachOrdered(expected::add);
list.addAll(expected);
assertArrayEquals(expected.toArray(), list.toArray());
}
|
@Override
public Block apply(Block rowNumberBlock)
{
requireNonNull(rowNumberBlock, "rowNumberBlock is null");
int positionCount = rowNumberBlock.getPositionCount();
Block lazyBlock = new LazyBlock(positionCount, (block) -> {
BlockBuilder blockBuilder = VARBINARY.createBlockBuilder(null, positionCount, rowIDSlice.length());
for (int i = 0; i < positionCount; i++) {
long rowNumber = BIGINT.getLong(rowNumberBlock, i);
rowIDBytes.putLong(0, rowNumber);
VARBINARY.writeSlice(blockBuilder, rowIDSlice);
}
block.setBlock(blockBuilder.build());
});
return lazyBlock;
}
|
@Test
public void testApply()
{
Block rowNumbers = new LongArrayBlockBuilder(null, 5)
.writeLong(7L)
.writeLong(Long.MIN_VALUE)
.writeLong(0L)
.writeLong(1L)
.writeLong(-1L)
.writeLong(Long.MAX_VALUE)
.build();
Block rowIDs = coercer.apply(rowNumbers);
assertEquals(rowIDs.getPositionCount(), rowNumbers.getPositionCount());
assertRowId(rowIDs, 0, 7L);
assertRowId(rowIDs, 1, Long.MIN_VALUE);
assertRowId(rowIDs, 2, 0L);
assertRowId(rowIDs, 3, 1L);
assertRowId(rowIDs, 4, -1L);
assertRowId(rowIDs, 5, Long.MAX_VALUE);
}
|
public static <T> void forEachWithIndex(Iterable<T> iterable, ObjectIntProcedure<? super T> procedure)
{
FJIterate.forEachWithIndex(iterable, procedure, FJIterate.FORK_JOIN_POOL);
}
|
@Test
public void testForEachWithIndexToArrayUsingImmutableList()
{
Integer[] array = new Integer[200];
ImmutableList<Integer> list = Interval.oneTo(200).toList().toImmutable();
assertTrue(ArrayIterate.allSatisfy(array, Predicates.isNull()));
FJIterate.forEachWithIndex(list, (each, index) -> array[index] = each, 10, 10);
assertArrayEquals(array, list.toArray(new Integer[]{}));
}
|
public abstract void scan(File dir, FileVisitor visitor) throws IOException;
|
@Test public void globShouldUseDefaultExcludes() throws Exception {
FilePath tmp = new FilePath(tmpRule.getRoot());
try {
tmp.child(".gitignore").touch(0);
FilePath git = tmp.child(".git");
git.mkdirs();
git.child("HEAD").touch(0);
DirScanner glob1 = new DirScanner.Glob("**/*", null);
DirScanner glob2 = new DirScanner.Glob("**/*", null, true);
MatchingFileVisitor gitdir = new MatchingFileVisitor("HEAD");
MatchingFileVisitor gitignore = new MatchingFileVisitor(".gitignore");
glob1.scan(new File(tmp.getRemote()), gitdir);
glob2.scan(new File(tmp.getRemote()), gitignore);
assertFalse(gitdir.found);
assertFalse(gitignore.found);
} finally {
tmp.deleteRecursive();
}
}
|
@Override
public Table getTable(String dbName, String tblName) {
Table table;
try {
table = hmsOps.getTable(dbName, tblName);
} catch (StarRocksConnectorException e) {
LOG.error("Failed to get hive table [{}.{}.{}]", catalogName, dbName, tblName, e);
throw e;
} catch (Exception e) {
LOG.error("Failed to get hive table [{}.{}.{}]", catalogName, dbName, tblName, e);
return null;
}
return table;
}
|
@Test
public void testShowCreateHiveTbl() {
HiveTable hiveTable = (HiveTable) hiveMetadata.getTable("db1", "table1");
Assert.assertEquals("CREATE TABLE `table1` (\n" +
" `col2` int(11) DEFAULT NULL,\n" +
" `col1` int(11) DEFAULT NULL\n" +
")\n" +
"PARTITION BY (col1)\n" +
"PROPERTIES (\"location\" = \"hdfs://127.0.0.1:10000/hive\");",
AstToStringBuilder.getExternalCatalogTableDdlStmt(hiveTable));
}
|
@Override
public boolean isSatisfied(int index, TradingRecord tradingRecord) {
final boolean satisfied = cross.getValue(index);
traceIsSatisfied(index, satisfied);
return satisfied;
}
|
@Test
public void onlyThresholdBetweenFirstBarAndLastBar() {
Indicator<Num> evaluatedIndicator = new FixedDecimalIndicator(series, 11, 10, 10, 9);
CrossedDownIndicatorRule rule = new CrossedDownIndicatorRule(evaluatedIndicator, 10);
assertFalse(rule.isSatisfied(0));
assertFalse(rule.isSatisfied(1));
assertFalse(rule.isSatisfied(2));
assertTrue(rule.isSatisfied(3));
}
|
@Override
public boolean passes(final String scope) {
if (skipTestScope && SCOPE_TEST.equals(scope)) {
return true;
}
if (skipProvidedScope && SCOPE_PROVIDED.equals(scope)) {
return true;
}
if (skipSystemScope && SCOPE_SYSTEM.equals(scope)) {
return true;
}
if (skipRuntimeScope && SCOPE_RUNTIME.equals(scope)) {
return true;
}
if (skipRuntimeScope && skipSystemScope && SCOPE_COMPILE_PLUS_RUNTIME.equals(scope)) {
return true;
}
return false;
}
|
@Test
public void shouldExcludeArtifact() {
final Filter<String> artifactScopeExcluded = new ArtifactScopeExcluded(skipTestScope, skipProvidedScope, skipSystemScope, skipRuntimeScope);
assertThat(expectedResult, is(equalTo(artifactScopeExcluded.passes(testString))));
}
|
@Override
public Flux<ServiceInstance> getInstances(String serviceId) {
return Mono.justOrEmpty(serviceId).flatMapMany(loadInstancesFromPolaris())
.subscribeOn(Schedulers.boundedElastic());
}
|
@Test
public void testGetInstances() throws PolarisException {
when(serviceDiscovery.getInstances(anyString())).thenAnswer(invocation -> {
String serviceName = invocation.getArgument(0);
if (SERVICE_PROVIDER.equalsIgnoreCase(serviceName)) {
return singletonList(mock(ServiceInstance.class));
}
else {
throw new PolarisException(ErrorCode.UNKNOWN_SERVER_ERROR);
}
});
// Normal
Flux<ServiceInstance> instances = this.client.getInstances(SERVICE_PROVIDER);
StepVerifier.create(instances).expectNextCount(1).expectComplete().verify();
// PolarisException
instances = this.client.getInstances(SERVICE_PROVIDER + 1);
StepVerifier.create(instances).expectNextCount(0).expectComplete().verify();
}
|
public static DecimalParseResult parseIncludeLeadingZerosInPrecision(String stringValue)
{
return parse(stringValue, true);
}
|
@Test
public void testParseIncludeLeadingZerosInPrecision()
{
assertParseResultIncludeLeadingZerosInPrecision("0", 0L, 1, 0);
assertParseResultIncludeLeadingZerosInPrecision("+0", 0L, 1, 0);
assertParseResultIncludeLeadingZerosInPrecision("-0", 0L, 1, 0);
assertParseResultIncludeLeadingZerosInPrecision("00000000000000000", 0L, 17, 0);
assertParseResultIncludeLeadingZerosInPrecision("+00000000000000000", 0L, 17, 0);
assertParseResultIncludeLeadingZerosInPrecision("-00000000000000000", 0L, 17, 0);
assertParseResultIncludeLeadingZerosInPrecision("1.1", 11L, 2, 1);
assertParseResultIncludeLeadingZerosInPrecision("+1.1", 11L, 2, 1);
assertParseResultIncludeLeadingZerosInPrecision("-1.1", -11L, 2, 1);
assertParseResultIncludeLeadingZerosInPrecision("0001.1", 11L, 5, 1);
assertParseResultIncludeLeadingZerosInPrecision("+0001.1", 11L, 5, 1);
assertParseResultIncludeLeadingZerosInPrecision("-0001.1", -11L, 5, 1);
assertParseResultIncludeLeadingZerosInPrecision("000", 0L, 3, 0);
assertParseResultIncludeLeadingZerosInPrecision("+000", 0L, 3, 0);
assertParseResultIncludeLeadingZerosInPrecision("-000", -0L, 3, 0);
assertParseResultIncludeLeadingZerosInPrecision("000.1", 1L, 4, 1);
assertParseResultIncludeLeadingZerosInPrecision("+000.1", 1L, 4, 1);
assertParseResultIncludeLeadingZerosInPrecision("-000.1", -1L, 4, 1);
assertParseResultIncludeLeadingZerosInPrecision("000000000000000000", 0L, 18, 0);
assertParseResultIncludeLeadingZerosInPrecision("+000000000000000000", 0L, 18, 0);
assertParseResultIncludeLeadingZerosInPrecision("-000000000000000000", 0L, 18, 0);
assertParseResultIncludeLeadingZerosInPrecision("000000000000000000.123", encodeUnscaledValue("123"), 21, 3);
assertParseResultIncludeLeadingZerosInPrecision("+000000000000000000.123", encodeUnscaledValue("123"), 21, 3);
assertParseResultIncludeLeadingZerosInPrecision("-000000000000000000.123", encodeUnscaledValue("-123"), 21, 3);
}
|
public void disableBroker() throws Exception {
serviceUnitStateChannel.cleanOwnerships();
leaderElectionService.close();
brokerRegistry.unregister();
// Close the internal topics (if owned any) after giving up the possible leader role,
// so that the subsequent lookups could hit the next leader.
closeInternalTopics();
}
|
@Test
public void testDisableBroker() throws Exception {
// Test rollback to modular load manager.
ServiceConfiguration defaultConf = getDefaultConf();
defaultConf.setAllowAutoTopicCreation(true);
defaultConf.setForceDeleteNamespaceAllowed(true);
defaultConf.setLoadManagerClassName(ExtensibleLoadManagerImpl.class.getName());
defaultConf.setLoadBalancerLoadSheddingStrategy(TransferShedder.class.getName());
defaultConf.setLoadBalancerSheddingEnabled(false);
defaultConf.setLoadBalancerDebugModeEnabled(true);
defaultConf.setTopicLevelPoliciesEnabled(false);
try (var additionalPulsarTestContext = createAdditionalPulsarTestContext(defaultConf)) {
var pulsar3 = additionalPulsarTestContext.getPulsarService();
ExtensibleLoadManagerImpl ternaryLoadManager = spy((ExtensibleLoadManagerImpl)
FieldUtils.readField(pulsar3.getLoadManager().get(), "loadManager", true));
String topic = "persistent://" + defaultTestNamespace +"/test";
String lookupResult1 = pulsar3.getAdminClient().lookups().lookupTopic(topic);
TopicName topicName = TopicName.get(topic);
NamespaceBundle bundle = getBundleAsync(pulsar1, topicName).get();
if (!pulsar3.getBrokerServiceUrl().equals(lookupResult1)) {
admin.namespaces().unloadNamespaceBundle(topicName.getNamespace(), bundle.getBundleRange(),
pulsar3.getBrokerId());
lookupResult1 = pulsar2.getAdminClient().lookups().lookupTopic(topic);
}
String lookupResult2 = pulsar1.getAdminClient().lookups().lookupTopic(topic);
String lookupResult3 = pulsar2.getAdminClient().lookups().lookupTopic(topic);
assertEquals(lookupResult1, pulsar3.getBrokerServiceUrl());
assertEquals(lookupResult1, lookupResult2);
assertEquals(lookupResult1, lookupResult3);
assertFalse(primaryLoadManager.checkOwnershipAsync(Optional.empty(), bundle).get());
assertFalse(secondaryLoadManager.checkOwnershipAsync(Optional.empty(), bundle).get());
assertTrue(ternaryLoadManager.checkOwnershipAsync(Optional.empty(), bundle).get());
ternaryLoadManager.disableBroker();
assertFalse(ternaryLoadManager.checkOwnershipAsync(Optional.empty(), bundle).get());
if (primaryLoadManager.checkOwnershipAsync(Optional.empty(), bundle).get()) {
assertFalse(secondaryLoadManager.checkOwnershipAsync(Optional.empty(), bundle).get());
} else {
assertTrue(secondaryLoadManager.checkOwnershipAsync(Optional.empty(), bundle).get());
}
}
}
|
@Override
public SpringCache getCache(final String name) {
final RemoteCache<Object, Object> nativeCache = this.nativeCacheManager.getCache(name);
if (nativeCache == null) {
springCaches.remove(name);
return null;
}
return springCaches.computeIfAbsent(name, n -> new SpringCache(nativeCache, reactive, readTimeout, writeTimeout));
}
|
@Test
public final void getCacheShouldReturnNullItWasChangedByRemoteCacheManager() {
// When
objectUnderTest.getCache(TEST_CACHE_NAME);
remoteCacheManager.administration().removeCache(TEST_CACHE_NAME);
// Then
assertNull(objectUnderTest.getCache(TEST_CACHE_NAME));
}
|
public void doGet( HttpServletRequest request, HttpServletResponse response ) throws ServletException,
IOException {
if ( isJettyMode() && !request.getContextPath().startsWith( CONTEXT_PATH ) ) {
return;
}
if ( log.isDebug() ) {
logDebug( BaseMessages.getString( PKG, "ExecuteTransServlet.Log.ExecuteTransRequested" ) );
}
// Options taken from PAN
//
String[] knownOptions = new String[] { REP, USER, PASS, TRANS, LEVEL };
String repOption = request.getParameter( REP );
String userOption = request.getParameter( USER );
String passOption = Encr.decryptPasswordOptionallyEncrypted( request.getParameter( PASS ) );
String transOption = request.getParameter( TRANS );
String levelOption = request.getParameter( LEVEL );
response.setStatus( HttpServletResponse.SC_OK );
String encoding = System.getProperty( "KETTLE_DEFAULT_SERVLET_ENCODING", null );
if ( encoding != null && !Utils.isEmpty( encoding.trim() ) ) {
response.setCharacterEncoding( encoding );
response.setContentType( "text/html; charset=" + encoding );
}
PrintWriter out = response.getWriter();
if ( transOption == null ) {
response.setStatus( HttpServletResponse.SC_BAD_REQUEST );
out.println( new WebResult( WebResult.STRING_ERROR, BaseMessages.getString(
PKG, "ExecuteTransServlet.Error.MissingMandatoryParameter", TRANS ) ) );
return;
}
try {
final Repository repository = openRepository( repOption, userOption, passOption );
final TransMeta transMeta = loadTransformation( repository, transOption );
// Set the servlet parameters as variables in the transformation
//
String[] parameters = transMeta.listParameters();
Enumeration<?> parameterNames = request.getParameterNames();
while ( parameterNames.hasMoreElements() ) {
String parameter = (String) parameterNames.nextElement();
String[] values = request.getParameterValues( parameter );
// Ignore the known options. set the rest as variables
//
if ( Const.indexOfString( parameter, knownOptions ) < 0 ) {
// If it's a trans parameter, set it, otherwise simply set the variable
//
if ( Const.indexOfString( parameter, parameters ) < 0 ) {
transMeta.setVariable( parameter, values[0] );
} else {
transMeta.setParameterValue( parameter, values[0] );
}
}
}
TransExecutionConfiguration transExecutionConfiguration = new TransExecutionConfiguration();
LogLevel logLevel = LogLevel.getLogLevelForCode( levelOption );
transExecutionConfiguration.setLogLevel( logLevel );
TransConfiguration transConfiguration = new TransConfiguration( transMeta, transExecutionConfiguration );
String carteObjectId = UUID.randomUUID().toString();
SimpleLoggingObject servletLoggingObject =
new SimpleLoggingObject( CONTEXT_PATH, LoggingObjectType.CARTE, null );
servletLoggingObject.setContainerObjectId( carteObjectId );
servletLoggingObject.setLogLevel( logLevel );
// Create the transformation and store in the list...
//
final Trans trans = new Trans( transMeta, servletLoggingObject );
trans.setRepository( repository );
trans.setSocketRepository( getSocketRepository() );
getTransformationMap().addTransformation( transMeta.getName(), carteObjectId, trans, transConfiguration );
trans.setContainerObjectId( carteObjectId );
if ( repository != null ) {
// The repository connection is open: make sure we disconnect from the repository once we
// are done with this transformation.
//
trans.addTransListener( new TransAdapter() {
@Override public void transFinished( Trans trans ) {
repository.disconnect();
}
} );
}
// Pass the servlet print writer to the transformation...
//
trans.setServletPrintWriter( out );
trans.setServletReponse( response );
trans.setServletRequest( request );
try {
// Execute the transformation...
//
executeTrans( trans );
String logging = KettleLogStore.getAppender().getBuffer( trans.getLogChannelId(), false ).toString();
if ( trans.isFinishedOrStopped() && trans.getErrors() > 0 ) {
response.setStatus( HttpServletResponse.SC_INTERNAL_SERVER_ERROR );
out.println( new WebResult( WebResult.STRING_ERROR, BaseMessages.getString(
PKG, "ExecuteTransServlet.Error.ErrorExecutingTrans", logging ) ) );
}
out.flush();
} catch ( Exception executionException ) {
String logging = KettleLogStore.getAppender().getBuffer( trans.getLogChannelId(), false ).toString();
throw new KettleException( BaseMessages.getString( PKG, "ExecuteTransServlet.Error.ErrorExecutingTrans", logging ), executionException );
}
} catch ( Exception ex ) {
// When we get to this point KettleAuthenticationException has already been wrapped in an Execution Exception
// and that in a KettleException
Throwable kettleExceptionCause = ex.getCause();
if ( kettleExceptionCause != null && kettleExceptionCause instanceof ExecutionException ) {
Throwable executionExceptionCause = kettleExceptionCause.getCause();
if ( executionExceptionCause != null && executionExceptionCause instanceof KettleAuthenticationException ) {
response.setStatus( HttpServletResponse.SC_UNAUTHORIZED );
out.println( new WebResult( WebResult.STRING_ERROR, BaseMessages.getString(
PKG, "ExecuteTransServlet.Error.Authentication", getContextPath() ) ) );
}
} else if ( ex.getMessage().contains( UNABLE_TO_FIND_TRANS ) ) {
response.setStatus( HttpServletResponse.SC_NOT_FOUND );
out.println( new WebResult( WebResult.STRING_ERROR, BaseMessages.getString(
PKG, "ExecuteTransServlet.Error.UnableToFindTransformation", transOption ) ) );
} else {
response.setStatus( HttpServletResponse.SC_INTERNAL_SERVER_ERROR );
out.println( new WebResult( WebResult.STRING_ERROR, BaseMessages.getString(
PKG, "ExecuteTransServlet.Error.UnexpectedError", Const.CR + Const.getStackTracker( ex ) ) ) );
}
}
}
|
@Test
public void doGetMissingMandatoryParamRepoTest() throws Exception {
HttpServletRequest mockHttpServletRequest = mock( HttpServletRequest.class );
HttpServletResponse mockHttpServletResponse = mock( HttpServletResponse.class );
KettleLogStore.init();
StringWriter out = new StringWriter();
PrintWriter printWriter = new PrintWriter( out );
when( mockHttpServletRequest.getParameter( "rep" ) ).thenReturn( null );
when( mockHttpServletRequest.getParameter( "trans" ) ).thenReturn( null );
when( mockHttpServletResponse.getWriter() ).thenReturn( printWriter );
executeTransServlet.doGet( mockHttpServletRequest, mockHttpServletResponse );
verify( mockHttpServletResponse ).setStatus( HttpServletResponse.SC_OK );
verify( mockHttpServletResponse ).setStatus( HttpServletResponse.SC_BAD_REQUEST );
}
|
static String readFileContents(String fileName) {
try {
File file = new File(fileName);
return Files.readString(file.toPath(), StandardCharsets.UTF_8);
} catch (IOException e) {
throw new RuntimeException("Could not get " + fileName, e);
}
}
|
@Test
public void readFileContents()
throws IOException {
// given
String expectedContents = "Hello, world!\nThis is a test with Unicode ✓.";
String testFile = createTestFile(expectedContents);
// when
String actualContents = AzureDiscoveryStrategyFactory.readFileContents(testFile);
// then
assertEquals(expectedContents, actualContents);
}
|
@Override
public int size() {
return eventHandler.size();
}
|
@Test
public void testHandleExceptionThrowingAnException() throws Exception {
try (KafkaEventQueue queue = new KafkaEventQueue(Time.SYSTEM, logContext, "testHandleExceptionThrowingAnException")) {
CompletableFuture<Void> initialFuture = new CompletableFuture<>();
queue.append(() -> initialFuture.get());
AtomicInteger counter = new AtomicInteger(0);
queue.append(new EventQueue.Event() {
@Override
public void run() {
counter.incrementAndGet();
throw new IllegalStateException("First exception");
}
@Override
public void handleException(Throwable e) {
if (e instanceof IllegalStateException) {
counter.incrementAndGet();
throw new RuntimeException("Second exception");
}
}
});
queue.append(() -> counter.incrementAndGet());
assertEquals(3, queue.size());
initialFuture.complete(null);
TestUtils.waitForCondition(() -> counter.get() == 3,
"Failed to see all events execute as planned.");
}
}
|
public static ResolvableInetSocketAddress wrap(InetSocketAddress socketAddress) {
if (socketAddress == null) {
return null;
}
return new ResolvableInetSocketAddress(socketAddress);
}
|
@Test
public void testWrapWithNull() throws Exception {
assertThat(ResolvableInetSocketAddress.wrap(null)).isNull();
}
|
@Override
public boolean contains(Object o) {
for (M member : members) {
if (selector.select(member) && o.equals(member)) {
return true;
}
}
return false;
}
|
@Test
public void testDoesNotContainOtherMemberWhenDataMembersSelected() {
Collection<MemberImpl> collection = new MemberSelectingCollection<>(members, DATA_MEMBER_SELECTOR);
assertFalse(collection.contains(nonExistingMember));
}
|
public SymbolTableMetadata extractMetadata(String fullName)
{
int index = fullName.indexOf(SERVER_NODE_URI_PREFIX_TABLENAME_SEPARATOR);
// If no separator char was found, we assume it's a local table.
if (index == -1)
{
return new SymbolTableMetadata(null, fullName, false);
}
if (index == 0 || index == fullName.length() - 1)
{
throw new RuntimeException("Unexpected name format for name: " + fullName);
}
String serverNodeUri = fullName.substring(0, index);
String tableName = fullName.substring(index + 1);
return createMetadata(serverNodeUri, tableName);
}
|
@Test
public void testExtractTableInfoLocalTable()
{
SymbolTableMetadata metadata =
new SymbolTableMetadataExtractor().extractMetadata("Prefix-1000");
Assert.assertNull(metadata.getServerNodeUri());
Assert.assertEquals(metadata.getSymbolTableName(), "Prefix-1000");
Assert.assertFalse(metadata.isRemote());
}
|
@Override
public synchronized Multimap<String, String> findBundlesForUnloading(final LoadData loadData,
final ServiceConfiguration conf) {
selectedBundlesCache.clear();
final double threshold = conf.getLoadBalancerBrokerThresholdShedderPercentage() / 100.0;
final Map<String, Long> recentlyUnloadedBundles = loadData.getRecentlyUnloadedBundles();
final double minThroughputThreshold = conf.getLoadBalancerBundleUnloadMinThroughputThreshold() * MB;
final double avgUsage = getBrokerAvgUsage(loadData, conf.getLoadBalancerHistoryResourcePercentage(), conf);
if (avgUsage == 0) {
log.warn("average max resource usage is 0");
return selectedBundlesCache;
}
loadData.getBrokerData().forEach((broker, brokerData) -> {
final LocalBrokerData localData = brokerData.getLocalData();
final double currentUsage = brokerAvgResourceUsage.getOrDefault(broker, 0.0);
if (currentUsage < avgUsage + threshold) {
if (log.isDebugEnabled()) {
log.debug("[{}] broker is not overloaded, ignoring at this point ({})", broker,
localData.printResourceUsage());
}
return;
}
double percentOfTrafficToOffload =
currentUsage - avgUsage - threshold + ADDITIONAL_THRESHOLD_PERCENT_MARGIN;
double brokerCurrentThroughput = localData.getMsgThroughputIn() + localData.getMsgThroughputOut();
double minimumThroughputToOffload = brokerCurrentThroughput * percentOfTrafficToOffload;
if (minimumThroughputToOffload < minThroughputThreshold) {
if (log.isDebugEnabled()) {
log.debug("[{}] broker is planning to shed throughput {} MByte/s less than "
+ "minimumThroughputThreshold {} MByte/s, skipping bundle unload ({})",
broker, minimumThroughputToOffload / MB, minThroughputThreshold / MB,
localData.printResourceUsage());
}
return;
}
log.info(
"Attempting to shed load on {}, which has max resource usage above avgUsage and threshold {}%"
+ " > {}% + {}% -- Offloading at least {} MByte/s of traffic,"
+ " left throughput {} MByte/s ({})",
broker, 100 * currentUsage, 100 * avgUsage, 100 * threshold, minimumThroughputToOffload / MB,
(brokerCurrentThroughput - minimumThroughputToOffload) / MB, localData.printResourceUsage());
if (localData.getBundles().size() > 1) {
filterAndSelectBundle(loadData, recentlyUnloadedBundles, broker, localData, minimumThroughputToOffload);
} else if (localData.getBundles().size() == 1) {
log.warn(
"HIGH USAGE WARNING : Sole namespace bundle {} is overloading broker {}. "
+ "No Load Shedding will be done on this broker",
localData.getBundles().iterator().next(), broker);
} else {
log.warn("Broker {} is overloaded despite having no bundles", broker);
}
});
if (selectedBundlesCache.isEmpty() && conf.isLowerBoundarySheddingEnabled()) {
tryLowerBoundaryShedding(loadData, conf);
}
return selectedBundlesCache;
}
|
@Test
public void testLowerBoundarySheddingBrokerWithOneBundle() {
int brokerNum = 11;
int lowLoadNode = 5;
int brokerWithManyBundles = 3;
LoadData loadData = new LoadData();
double throughput = 100 * 1024 * 1024;
//There are 11 Brokers, of which 10 are loaded at 80% and 1 is loaded at 0%.
//Only broker3 has 10 bundles.
for (int i = 0; i < brokerNum; i++) {
LocalBrokerData broker = new LocalBrokerData();
//Broker3 has 10 bundles
int numBundles = i == brokerWithManyBundles ? 10 : 1;
for (int j = 0; j < numBundles; j++) {
BundleData bundle = new BundleData();
TimeAverageMessageData timeAverageMessageData = new TimeAverageMessageData();
timeAverageMessageData.setMsgThroughputIn(i == lowLoadNode ? 0 : throughput);
timeAverageMessageData.setMsgThroughputOut(i == lowLoadNode ? 0 : throughput);
bundle.setShortTermData(timeAverageMessageData);
String broker2BundleName = "broker-" + i + "-bundle-" + j;
loadData.getBundleData().put(broker2BundleName, bundle);
broker.getBundles().add(broker2BundleName);
}
broker.setBandwidthIn(new ResourceUsage(i == lowLoadNode ? 0 : 80, 100));
broker.setBandwidthOut(new ResourceUsage(i == lowLoadNode ? 0 : 80, 100));
broker.setMsgThroughputIn(i == lowLoadNode ? 0 : throughput);
broker.setMsgThroughputOut(i == lowLoadNode ? 0 : throughput);
loadData.getBrokerData().put("broker-" + i, new BrokerData(broker));
}
ThresholdShedder shedder = new ThresholdShedder();
Multimap<String, String> bundlesToUnload = shedder.findBundlesForUnloading(loadData, conf);
assertTrue(bundlesToUnload.isEmpty());
conf.setLowerBoundarySheddingEnabled(true);
bundlesToUnload = thresholdShedder.findBundlesForUnloading(loadData, conf);
assertFalse(bundlesToUnload.isEmpty());
assertEquals(bundlesToUnload.size(), 1);
assertTrue(bundlesToUnload.containsKey("broker-3"));
}
|
public static Read<DynamicMessage> readProtoDynamicMessages(
ProtoDomain domain, String fullMessageName) {
SerializableFunction<PubsubMessage, DynamicMessage> parser =
message -> {
try {
return DynamicMessage.parseFrom(
domain.getDescriptor(fullMessageName), message.getPayload());
} catch (InvalidProtocolBufferException e) {
throw new RuntimeException("Could not parse Pub/Sub message", e);
}
};
ProtoDynamicMessageSchema<DynamicMessage> schema =
ProtoDynamicMessageSchema.forDescriptor(domain, domain.getDescriptor(fullMessageName));
return Read.newBuilder(parser)
.setCoder(
SchemaCoder.of(
schema.getSchema(),
TypeDescriptor.of(DynamicMessage.class),
schema.getToRowFunction(),
schema.getFromRowFunction()))
.build();
}
|
@Test
public void testProtoDynamicMessagesFromDescriptor() {
ProtoCoder<Primitive> coder = ProtoCoder.of(Primitive.class);
ImmutableList<Primitive> inputs =
ImmutableList.of(
Primitive.newBuilder().setPrimitiveInt32(42).build(),
Primitive.newBuilder().setPrimitiveBool(true).build(),
Primitive.newBuilder().setPrimitiveString("Hello, World!").build());
setupTestClient(inputs, coder);
PCollection<Primitive> read =
pipeline
.apply(
PubsubIO.readProtoDynamicMessages(Primitive.getDescriptor())
.fromSubscription(SUBSCRIPTION.getPath())
.withClock(CLOCK)
.withClientFactory(clientFactory))
.apply(
"Return To Primitive",
MapElements.into(TypeDescriptor.of(Primitive.class))
.via(
(DynamicMessage message) -> {
try {
return Primitive.parseFrom(message.toByteArray());
} catch (InvalidProtocolBufferException e) {
throw new RuntimeException("Could not return to Primitive", e);
}
}));
PAssert.that(read).containsInAnyOrder(inputs);
pipeline.run();
}
|
public static <T> T checkNotNull(T argument, String name) {
if (argument == null) {
throw new NullPointerException(name + " can't be null");
}
return argument;
}
|
@Test
public void test_checkNotNull2_whenNotNull() {
checkNotNull(new LinkedList(), "foo");
}
|
public static Optional<Object> doLogin(final String username, final String password, final String url) throws IOException {
Map<String, Object> loginMap = new HashMap<>(2);
loginMap.put(Constants.LOGIN_NAME, username);
loginMap.put(Constants.PASS_WORD, password);
String result = OkHttpTools.getInstance().get(url, loginMap);
Map<String, Object> resultMap = GsonUtils.getInstance().convertToMap(result);
if (!String.valueOf(CommonErrorCode.SUCCESSFUL).equals(String.valueOf(resultMap.get(Constants.ADMIN_RESULT_CODE)))) {
return Optional.empty();
}
String tokenJson = GsonUtils.getInstance().toJson(resultMap.get(Constants.ADMIN_RESULT_DATA));
LOGGER.info("login success: {} ", tokenJson);
Map<String, Object> tokenMap = GsonUtils.getInstance().convertToMap(tokenJson);
return Optional.ofNullable(tokenMap.get(Constants.ADMIN_RESULT_TOKEN));
}
|
@Test
public void testDoLogin() throws IOException {
final String userName = "userName";
final String password = "password";
final String token = "token";
Map<String, Object> loginMap = new HashMap<>(2);
loginMap.put(Constants.LOGIN_NAME, userName);
loginMap.put(Constants.PASS_WORD, password);
when(okHttpTools.get(url, loginMap)).thenReturn("{\"code\":200,\"data\":{\"token\":\"" + token + "\"}}");
try (MockedStatic<OkHttpTools> okHttpToolsMockedStatic = mockStatic(OkHttpTools.class)) {
okHttpToolsMockedStatic.when(OkHttpTools::getInstance).thenReturn(okHttpTools);
Optional<Object> objectOptional = RegisterUtils.doLogin(userName, password, url);
Assertions.assertEquals(token, objectOptional.get());
}
}
|
public static boolean isBlank(final CharSequence cs) {
int strLen;
if (cs == null || (strLen = cs.length()) == 0) {
return true;
}
for (int i = 0; i < strLen; i++) {
if (!Character.isWhitespace(cs.charAt(i))) {
return false;
}
}
return true;
}
|
@Test
public void testIsBlank() {
assertFalse(StringUtils.isBlank("abc"));
assertTrue(StringUtils.isNotBlank("abc"));
assertTrue(StringUtils.isBlank(" "));
assertTrue(StringUtils.isBlank(null));
}
|
public static boolean xor(boolean... array) {
if (ArrayUtil.isEmpty(array)) {
throw new IllegalArgumentException("The Array must not be empty");
}
boolean result = false;
for (final boolean element : array) {
result ^= element;
}
return result;
}
|
@Test
public void xorTest(){
assertTrue(BooleanUtil.xor(true,false));
assertTrue(BooleanUtil.xorOfWrap(true,false));
}
|
public ByteBuffer fetchOnePacket() throws IOException {
int readLen;
ByteBuffer result = defaultBuffer;
result.clear();
while (true) {
headerByteBuffer.clear();
readLen = readAll(headerByteBuffer);
if (readLen != PACKET_HEADER_LEN) {
// remote has close this channel
LOG.info("Receive packet header failed, " +
"remote {} may close the channel.", remoteHostPortString);
return null;
}
if (packetId() != sequenceId) {
LOG.warn("receive packet sequence id[" + packetId() + "] want to get[" + sequenceId + "]");
throw new IOException("Bad packet sequence.");
}
int packetLen = packetLen();
if ((result.capacity() - result.position()) < packetLen) {
// byte buffer is not enough, new one packet
ByteBuffer tmp;
if (packetLen < MAX_PHYSICAL_PACKET_LENGTH) {
// last packet, enough to this packet is OK.
tmp = ByteBuffer.allocate(packetLen + result.position());
} else {
// already have packet, to allocate two packet.
tmp = ByteBuffer.allocate(2 * packetLen + result.position());
}
tmp.put(result.array(), 0, result.position());
result = tmp;
}
// read one physical packet
// before read, set limit to make read only one packet
result.limit(result.position() + packetLen);
readLen = readAll(result);
if (readLen != packetLen) {
LOG.warn("Length of received packet content(" + readLen
+ ") is not equal with length in head.(" + packetLen + ")");
return null;
}
accSequenceId();
if (packetLen != MAX_PHYSICAL_PACKET_LENGTH) {
result.flip();
break;
}
}
return result;
}
|
@Test
public void testReceive() throws IOException {
// mock
new Expectations() {
{
channel.read((ByteBuffer) any);
minTimes = 0;
result = new Delegate() {
int fakeRead(ByteBuffer buffer) {
MysqlSerializer serializer = MysqlSerializer.newInstance();
if (readIdx == 0) {
readIdx++;
serializer.writeInt3(10);
serializer.writeInt1(packetId++);
buffer.put(serializer.toArray());
return 4;
} else if (readIdx == 1) {
readIdx++;
byte[] buf = new byte[buffer.remaining()];
for (int i = 0; i < buffer.remaining(); ++i) {
buf[i] = (byte) ('a' + i);
}
buffer.put(buf);
return 10;
}
return -1;
}
};
}
};
MysqlChannel channel1 = new MysqlChannel(channel);
ByteBuffer buf = channel1.fetchOnePacket();
Assert.assertEquals(10, buf.remaining());
for (int i = 0; i < 10; ++i) {
Assert.assertEquals('a' + i, buf.get());
}
}
|
public static List<String> getAliasesGroup(String registry) {
for (ImmutableSet<String> aliasGroup : REGISTRY_ALIAS_GROUPS) {
if (aliasGroup.contains(registry)) {
// Found a group. Move the requested "registry" to the front before returning it.
Stream<String> self = Stream.of(registry);
Stream<String> withoutSelf = aliasGroup.stream().filter(alias -> !registry.equals(alias));
return Stream.concat(self, withoutSelf).collect(Collectors.toList());
}
}
return Collections.singletonList(registry);
}
|
@Test
public void testGetAliasesGroup_noKnownAliases() {
List<String> singleton = RegistryAliasGroup.getAliasesGroup("something.gcr.io");
Assert.assertEquals(1, singleton.size());
Assert.assertEquals("something.gcr.io", singleton.get(0));
}
|
public ReliableTopicConfig addMessageListenerConfig(ListenerConfig listenerConfig) {
checkNotNull(listenerConfig, "listenerConfig can't be null");
listenerConfigs.add(listenerConfig);
return this;
}
|
@Test
public void addMessageListenerConfig() {
ReliableTopicConfig config = new ReliableTopicConfig("foo");
ListenerConfig listenerConfig = new ListenerConfig("foobar");
config.addMessageListenerConfig(listenerConfig);
assertEquals(List.of(listenerConfig), config.getMessageListenerConfigs());
}
|
@Override
public K getKey() {
if (keyObject == null) {
keyObject = serializationService.toObject(keyData);
}
return keyObject;
}
|
@Test
public void testGetKey() {
String keyObject = "key";
Data keyData = serializationService.toData(keyObject);
QueryableEntry entry = createEntry(keyData, new Object(), newExtractor());
Object key = entry.getKey();
assertEquals(keyObject, key);
}
|
public static List<UnixMountInfo> getUnixMountInfo() throws IOException {
Preconditions.checkState(OSUtils.isLinux() || OSUtils.isMacOS());
String output = execCommand(MOUNT_COMMAND);
List<UnixMountInfo> mountInfo = new ArrayList<>();
for (String line : output.split("\n")) {
mountInfo.add(parseMountInfo(line));
}
return mountInfo;
}
|
@Test
public void getMountInfo() throws Exception {
assumeTrue(OSUtils.isMacOS() || OSUtils.isLinux());
List<UnixMountInfo> info = ShellUtils.getUnixMountInfo();
assertTrue(info.size() > 0);
}
|
public int[] startBatchWithRunStrategy(
@NotNull String workflowId,
@NotNull RunStrategy runStrategy,
List<WorkflowInstance> instances) {
if (instances == null || instances.isEmpty()) {
return new int[0];
}
return withMetricLogError(
() -> {
Set<String> uuids =
instances.stream().map(WorkflowInstance::getWorkflowUuid).collect(Collectors.toSet());
return withRetryableTransaction(
conn -> {
final long nextInstanceId = getLatestInstanceId(conn, workflowId) + 1;
if (dedupAndCheckIfAllDuplicated(conn, workflowId, uuids)) {
return new int[instances.size()];
}
long lastAssignedInstanceId =
completeInstancesInit(conn, nextInstanceId, uuids, instances);
int[] res;
switch (runStrategy.getRule()) {
case SEQUENTIAL:
case PARALLEL:
case STRICT_SEQUENTIAL:
res = enqueueInstances(conn, workflowId, instances);
break;
case FIRST_ONLY:
res = startFirstOnlyInstances(conn, workflowId, instances);
break;
case LAST_ONLY:
res = startLastOnlyInstances(conn, workflowId, instances);
break;
default:
throw new MaestroInternalError(
"When startBatch, run strategy [%s] is not supported.", runStrategy);
}
if (lastAssignedInstanceId >= nextInstanceId) {
updateLatestInstanceId(conn, workflowId, lastAssignedInstanceId);
}
return res;
});
},
"startBatchWithRunStrategy",
"Failed to start [{}] workflow instances for [{}] with run strategy [{}]",
instances.size(),
workflowId,
runStrategy);
}
|
@Test
public void testStartBatchRunStrategyWithFirstOnly() throws Exception {
List<WorkflowInstance> batch = prepareBatch();
int[] res =
runStrategyDao.startBatchWithRunStrategy(
TEST_WORKFLOW_ID, RunStrategy.create("FIRST_ONLY"), batch);
assertArrayEquals(new int[] {1, 0, -1}, res);
assertEquals(1, batch.get(0).getWorkflowInstanceId());
assertEquals(0, batch.get(1).getWorkflowInstanceId());
assertEquals(2, batch.get(2).getWorkflowInstanceId());
WorkflowInstance previous = dao.getWorkflowInstanceRun(TEST_WORKFLOW_ID, 1, 1);
WorkflowInstance latestRun = dao.getLatestWorkflowInstanceRun(TEST_WORKFLOW_ID, 2);
assertEquals(1, previous.getWorkflowInstanceId());
assertEquals("wfi1-uuid", previous.getWorkflowUuid());
assertEquals(WorkflowInstance.Status.CREATED, previous.getStatus());
assertEquals(2, latestRun.getWorkflowInstanceId());
assertEquals("wfi3-uuid", latestRun.getWorkflowUuid());
assertEquals(WorkflowInstance.Status.STOPPED, latestRun.getStatus());
verifyPublish(0, 0, 0, 1, 1);
MaestroTestHelper.removeWorkflowInstance(dataSource, TEST_WORKFLOW_ID, 2);
}
|
public static Predicate decode(byte[] buf) {
Objects.requireNonNull(buf, "buf");
Slime slime = com.yahoo.slime.BinaryFormat.decode(buf);
return decode(slime.get());
}
|
@Test
void requireThatDecodeNullThrows() {
try {
BinaryFormat.decode(null);
fail();
} catch (NullPointerException e) {
assertEquals("buf", e.getMessage());
}
}
|
public T send() throws IOException {
return web3jService.send(this, responseType);
}
|
@Test
public void testNetVersion() throws Exception {
web3j.netVersion().send();
verifyResult("{\"jsonrpc\":\"2.0\",\"method\":\"net_version\",\"params\":[],\"id\":1}");
}
|
public String build( final String cellValue ) {
switch ( type ) {
case FORALL:
return buildForAll( cellValue );
case INDEXED:
return buildMulti( cellValue );
default:
return buildSingle( cellValue );
}
}
|
@Test
public void testForAllOrMultiple() {
final String snippet = "forall(||){something == $} && forall(||){something < $}";
final SnippetBuilder snip = new SnippetBuilder(snippet);
final String result = snip.build("x, y");
assertThat(result).isEqualTo("something == x || something == y && something < x || something < y");
}
|
@Override
public List<String> readMultiParam(String key) {
String[] values = source.getParameterValues(key);
return values == null ? emptyList() : ImmutableList.copyOf(values);
}
|
@Test
public void read_multi_param_from_source_with_values() {
when(source.getParameterValues("param")).thenReturn(new String[]{"firstValue", "secondValue", "thirdValue"});
List<String> result = underTest.readMultiParam("param");
assertThat(result).containsExactly("firstValue", "secondValue", "thirdValue");
}
|
public void addWriteStat(String partitionPath, HoodieWriteStat stat) {
if (!partitionToWriteStats.containsKey(partitionPath)) {
partitionToWriteStats.put(partitionPath, new ArrayList<>());
}
partitionToWriteStats.get(partitionPath).add(stat);
}
|
@Test
public void verifyFieldNamesInCommitMetadata() throws IOException {
List<HoodieWriteStat> fakeHoodieWriteStats = HoodieTestUtils.generateFakeHoodieWriteStat(10);
HoodieCommitMetadata commitMetadata = new HoodieCommitMetadata();
fakeHoodieWriteStats.forEach(stat -> commitMetadata.addWriteStat(stat.getPartitionPath(), stat));
verifyMetadataFieldNames(commitMetadata, EXPECTED_FIELD_NAMES);
}
|
@Override
public CompletableFuture<SchemaAndMetadata> getSchema(String schemaId) {
return this.service.getSchema(schemaId);
}
|
@Test
public void testGetSchemaByVersion() {
String schemaId = "test-schema-id";
CompletableFuture<SchemaAndMetadata> getFuture = new CompletableFuture<>();
when(underlyingService.getSchema(eq(schemaId), any(SchemaVersion.class)))
.thenReturn(getFuture);
assertSame(getFuture, service.getSchema(schemaId, SchemaVersion.Latest));
verify(underlyingService, times(1))
.getSchema(eq(schemaId), same(SchemaVersion.Latest));
}
|
public static String toShortString(Throwable e, int stackLevel) {
StackTraceElement[] traces = e.getStackTrace();
StringBuilder sb = new StringBuilder(1024);
sb.append(e.toString()).append("\t");
if (traces != null) {
for (int i = 0; i < traces.length; i++) {
if (i < stackLevel) {
sb.append("\tat ").append(traces[i]).append("\t");
} else {
break;
}
}
}
return sb.toString();
}
|
@Test
public void toShortString() throws Exception {
SofaRpcException exception = new SofaRpcException(RpcErrorType.SERVER_BUSY, "111");
String string = ExceptionUtils.toShortString(exception, 1);
Assert.assertNotNull(string);
Pattern pattern = Pattern.compile("at");
Matcher matcher = pattern.matcher(string);
int count = 0;
while (matcher.find()) {
count++;
}
Assert.assertTrue(count == 1);
}
|
public void undelete() {
// make a copy because the selected trash items changes as soon as trashService.undelete is called
List<UIDeletedObject> selectedTrashFileItemsSnapshot = new ArrayList<UIDeletedObject>( selectedTrashFileItems );
if ( selectedTrashFileItemsSnapshot != null && selectedTrashFileItemsSnapshot.size() > 0 ) {
List<ObjectId> ids = new ArrayList<ObjectId>();
for ( UIDeletedObject uiObj : selectedTrashFileItemsSnapshot ) {
ids.add( uiObj.getId() );
}
try {
trashService.undelete( ids );
setTrash( trashService.getTrash() );
for ( UIDeletedObject uiObj : selectedTrashFileItemsSnapshot ) {
// find the closest UIRepositoryDirectory that is in the dirMap
RepositoryDirectoryInterface dir = repository.findDirectory( uiObj.getOriginalParentPath() );
while ( dir != null && dirMap.get( dir.getObjectId() ) == null ) {
dir = dir.getParent();
}
// now refresh that UIRepositoryDirectory so that the file/folders deck instantly refreshes on undelete
if ( dir != null ) {
dirMap.get( dir.getObjectId() ).refresh();
}
// if transformation or directory with transformations call extension to restore data services references.
if ( RepositoryObjectType.TRANSFORMATION.name().equals( uiObj.getType() ) ) {
TransMeta transMeta = repository.loadTransformation( uiObj.getId(), null );
ExtensionPointHandler
.callExtensionPoint( LogChannel.GENERAL, KettleExtensionPoint.TransAfterOpen.id, transMeta );
transMeta.clearChanged();
} else if ( !RepositoryObjectType.JOB.name().equals( uiObj.getType() ) ) {
// if not a transformation and not a job then is a Directory
RepositoryDirectoryInterface
actualDir =
repository.findDirectory(
uiObj.getOriginalParentPath() + RepositoryDirectory.DIRECTORY_SEPARATOR + uiObj.getName() );
if ( actualDir != null ) {
List<RepositoryElementMetaInterface> transformations = new ArrayList<>();
getAllTransformations( actualDir, transformations );
for ( RepositoryElementMetaInterface repositoryElementMetaInterface : transformations ) {
TransMeta transMeta = repository.loadTransformation( repositoryElementMetaInterface.getObjectId(), null );
ExtensionPointHandler
.callExtensionPoint( LogChannel.GENERAL, KettleExtensionPoint.TransAfterOpen.id, transMeta );
transMeta.clearChanged();
}
} else {
displayExceptionMessage( BaseMessages.getString( PKG, "TrashBrowseController.UnableToRestoreDirectory",
uiObj.getOriginalParentPath() + RepositoryDirectory.DIRECTORY_SEPARATOR + uiObj.getName() ) );
}
}
}
deck.setSelectedIndex( 1 );
} catch ( Throwable th ) {
if ( mainController == null || !mainController.handleLostRepository( th ) ) {
displayExceptionMessage( BaseMessages.getString( PKG,
"TrashBrowseController.UnableToRestoreFile", th.getLocalizedMessage() ) ); //$NON-NLS-1$
}
}
} else {
// ui probably allowed the button to be enabled when it shouldn't have been enabled
throw new RuntimeException();
}
}
|
@Test
public void testUnDeleteTransformation() throws Exception {
testUnDelete( RepositoryObjectType.TRANSFORMATION.name(), true );
verify( trashServiceMock, times( 1 ) ).undelete( anyList() );
verify( transMetaMock, times( 1 ) ).clearChanged();
verify( repositoryMock, times( 1 ) ).loadTransformation( objectIdMock, null );
verify( deckMock, times( 1 ) ).setSelectedIndex( 1 );
}
|
public Object getAsJavaType( String valueName, Class<?> destinationType, InjectionTypeConverter converter )
throws KettleValueException {
int idx = rowMeta.indexOfValue( valueName );
if ( idx < 0 ) {
throw new KettleValueException( "Unknown column '" + valueName + "'" );
}
ValueMetaInterface metaType = rowMeta.getValueMeta( idx );
// find by source value type
switch ( metaType.getType() ) {
case ValueMetaInterface.TYPE_STRING:
String vs = rowMeta.getString( data, idx );
return getStringAsJavaType( vs, destinationType, converter );
case ValueMetaInterface.TYPE_BOOLEAN:
Boolean vb = rowMeta.getBoolean( data, idx );
if ( String.class.isAssignableFrom( destinationType ) ) {
return converter.boolean2string( vb );
} else if ( int.class.isAssignableFrom( destinationType ) ) {
return converter.boolean2intPrimitive( vb );
} else if ( Integer.class.isAssignableFrom( destinationType ) ) {
return converter.boolean2integer( vb );
} else if ( long.class.isAssignableFrom( destinationType ) ) {
return converter.boolean2longPrimitive( vb );
} else if ( Long.class.isAssignableFrom( destinationType ) ) {
return converter.boolean2long( vb );
} else if ( boolean.class.isAssignableFrom( destinationType ) ) {
return converter.boolean2booleanPrimitive( vb );
} else if ( Boolean.class.isAssignableFrom( destinationType ) ) {
return converter.boolean2boolean( vb );
} else if ( destinationType.isEnum() ) {
return converter.boolean2enum( destinationType, vb );
} else {
throw new RuntimeException( "Wrong value conversion to " + destinationType );
}
case ValueMetaInterface.TYPE_INTEGER:
Long vi = rowMeta.getInteger( data, idx );
if ( String.class.isAssignableFrom( destinationType ) ) {
return converter.integer2string( vi );
} else if ( int.class.isAssignableFrom( destinationType ) ) {
return converter.integer2intPrimitive( vi );
} else if ( Integer.class.isAssignableFrom( destinationType ) ) {
return converter.integer2integer( vi );
} else if ( long.class.isAssignableFrom( destinationType ) ) {
return converter.integer2longPrimitive( vi );
} else if ( Long.class.isAssignableFrom( destinationType ) ) {
return converter.integer2long( vi );
} else if ( boolean.class.isAssignableFrom( destinationType ) ) {
return converter.integer2booleanPrimitive( vi );
} else if ( Boolean.class.isAssignableFrom( destinationType ) ) {
return converter.integer2boolean( vi );
} else if ( destinationType.isEnum() ) {
return converter.integer2enum( destinationType, vi );
} else {
throw new RuntimeException( "Wrong value conversion to " + destinationType );
}
case ValueMetaInterface.TYPE_NUMBER:
Double vn = rowMeta.getNumber( data, idx );
if ( String.class.isAssignableFrom( destinationType ) ) {
return converter.number2string( vn );
} else if ( int.class.isAssignableFrom( destinationType ) ) {
return converter.number2intPrimitive( vn );
} else if ( Integer.class.isAssignableFrom( destinationType ) ) {
return converter.number2integer( vn );
} else if ( long.class.isAssignableFrom( destinationType ) ) {
return converter.number2longPrimitive( vn );
} else if ( Long.class.isAssignableFrom( destinationType ) ) {
return converter.number2long( vn );
} else if ( boolean.class.isAssignableFrom( destinationType ) ) {
return converter.number2booleanPrimitive( vn );
} else if ( Boolean.class.isAssignableFrom( destinationType ) ) {
return converter.number2boolean( vn );
} else if ( destinationType.isEnum() ) {
return converter.number2enum( destinationType, vn );
} else {
throw new RuntimeException( "Wrong value conversion to " + destinationType );
}
}
throw new KettleValueException( "Unknown conversion from " + metaType.getTypeDesc() + " into " + destinationType );
}
|
@Test
public void testIntegerConversion() throws Exception {
row = new RowMetaAndData( rowsMeta, null, null, 7L );
assertEquals( true, row.getAsJavaType( "int", boolean.class, converter ) );
assertEquals( true, row.getAsJavaType( "int", Boolean.class, converter ) );
assertEquals( 7, row.getAsJavaType( "int", int.class, converter ) );
assertEquals( 7, row.getAsJavaType( "int", Integer.class, converter ) );
assertEquals( 7L, row.getAsJavaType( "int", long.class, converter ) );
assertEquals( 7L, row.getAsJavaType( "int", Long.class, converter ) );
assertEquals( "7", row.getAsJavaType( "int", String.class, converter ) );
row = new RowMetaAndData( rowsMeta, null, null, 0L );
assertEquals( false, row.getAsJavaType( "int", boolean.class, converter ) );
assertEquals( false, row.getAsJavaType( "int", Boolean.class, converter ) );
row = new RowMetaAndData( rowsMeta, null, null, null );
assertEquals( null, row.getAsJavaType( "int", String.class, converter ) );
assertEquals( null, row.getAsJavaType( "int", Integer.class, converter ) );
assertEquals( null, row.getAsJavaType( "int", Long.class, converter ) );
assertEquals( null, row.getAsJavaType( "int", Boolean.class, converter ) );
}
|
public static <T> List<LocalProperty<T>> grouped(Collection<T> columns)
{
return ImmutableList.of(new GroupingProperty<>(columns));
}
|
@Test
public void testConstantWithMultiGroup()
{
List<LocalProperty<String>> actual = builder()
.constant("a")
.grouped("a", "b")
.grouped("a", "c")
.build();
assertMatch(
actual,
builder().grouped("a", "b", "c", "d").build(),
Optional.of(grouped("d")));
assertMatch(
actual,
builder().grouped("a", "b", "c").build(),
Optional.empty());
assertMatch(
actual,
builder().grouped("a", "b").build(),
Optional.empty());
assertMatch(
actual,
builder().grouped("a", "c").build(),
Optional.of(grouped("c")));
assertMatch(
actual,
builder().grouped("b").build(),
Optional.empty());
assertMatch(
actual,
builder().grouped("b", "c").build(),
Optional.empty());
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.