focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
protected int compareDataNode(final DatanodeDescriptor a,
final DatanodeDescriptor b, boolean isBalanceLocal) {
boolean toleranceLimit = Math.max(a.getDfsUsedPercent(), b.getDfsUsedPercent())
< balancedSpaceToleranceLimit;
if (a.equals(b)
|| (toleranceLimit && Math.abs(a.getDfsUsedPercent() - b.getDfsUsedPercent())
< balancedSpaceTolerance) || ((
isBalanceLocal && a.getDfsUsedPercent() < 50))) {
return 0;
}
return a.getDfsUsedPercent() < b.getDfsUsedPercent() ? -1 : 1;
}
|
@Test
public void testCompareDataNode() {
DatanodeDescriptor[] tolerateDataNodes;
DatanodeStorageInfo[] tolerateStorages;
int capacity = 5;
Collection<Node> allTolerateNodes = new ArrayList<>(capacity);
String[] ownerRackOfTolerateNodes = new String[capacity];
for (int i = 0; i < capacity; i++) {
ownerRackOfTolerateNodes[i] = "rack"+i;
}
tolerateStorages = DFSTestUtil.createDatanodeStorageInfos(ownerRackOfTolerateNodes);
tolerateDataNodes = DFSTestUtil.toDatanodeDescriptor(tolerateStorages);
Collections.addAll(allTolerateNodes, tolerateDataNodes);
final BlockManager bm = namenode.getNamesystem().getBlockManager();
AvailableSpaceBlockPlacementPolicy toleratePlacementPolicy =
(AvailableSpaceBlockPlacementPolicy)bm.getBlockPlacementPolicy();
//96.6%
updateHeartbeatWithUsage(tolerateDataNodes[0],
30 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE * blockSize,
29 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE * blockSize,
HdfsServerConstants.MIN_BLOCKS_FOR_WRITE
* blockSize, 0L, 0L, 0L, 0, 0);
//93.3%
updateHeartbeatWithUsage(tolerateDataNodes[1],
30 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE * blockSize,
28 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE * blockSize,
HdfsServerConstants.MIN_BLOCKS_FOR_WRITE
* blockSize, 0L, 0L, 0L, 0, 0);
//90.0%
updateHeartbeatWithUsage(tolerateDataNodes[2],
30 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE * blockSize,
27 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE * blockSize,
HdfsServerConstants.MIN_BLOCKS_FOR_WRITE
* blockSize, 0L, 0L, 0L, 0, 0);
//86.6%
updateHeartbeatWithUsage(tolerateDataNodes[3],
30 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE * blockSize,
26 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE * blockSize,
HdfsServerConstants.MIN_BLOCKS_FOR_WRITE
* blockSize, 0L, 0L, 0L, 0, 0);
//83.3%
updateHeartbeatWithUsage(tolerateDataNodes[4],
30 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE * blockSize,
25 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE * blockSize,
HdfsServerConstants.MIN_BLOCKS_FOR_WRITE
* blockSize, 0L, 0L, 0L, 0, 0);
assertTrue(toleratePlacementPolicy.compareDataNode(tolerateDataNodes[0],
tolerateDataNodes[1], false) == 1);
assertTrue(toleratePlacementPolicy.compareDataNode(tolerateDataNodes[1],
tolerateDataNodes[0], false) == -1);
assertTrue(toleratePlacementPolicy.compareDataNode(tolerateDataNodes[1],
tolerateDataNodes[2], false) == 1);
assertTrue(toleratePlacementPolicy.compareDataNode(tolerateDataNodes[2],
tolerateDataNodes[1], false) == -1);
assertTrue(toleratePlacementPolicy.compareDataNode(tolerateDataNodes[2],
tolerateDataNodes[3], false) == 0);
assertTrue(toleratePlacementPolicy.compareDataNode(tolerateDataNodes[3],
tolerateDataNodes[2], false) == 0);
assertTrue(toleratePlacementPolicy.compareDataNode(tolerateDataNodes[2],
tolerateDataNodes[4], false) == 1);
assertTrue(toleratePlacementPolicy.compareDataNode(tolerateDataNodes[4],
tolerateDataNodes[2], false) == -1);
}
|
@Override
public synchronized ScheduleResult schedule()
{
dropListenersFromWhenFinishedOrNewLifespansAdded();
int overallSplitAssignmentCount = 0;
ImmutableSet.Builder<RemoteTask> overallNewTasks = ImmutableSet.builder();
List<ListenableFuture<?>> overallBlockedFutures = new ArrayList<>();
boolean anyBlockedOnPlacements = false;
boolean anyBlockedOnNextSplitBatch = false;
boolean anyNotBlocked = false;
for (Entry<Lifespan, ScheduleGroup> entry : scheduleGroups.entrySet()) {
Lifespan lifespan = entry.getKey();
ScheduleGroup scheduleGroup = entry.getValue();
if (scheduleGroup.state == ScheduleGroupState.NO_MORE_SPLITS || scheduleGroup.state == ScheduleGroupState.DONE) {
verify(scheduleGroup.nextSplitBatchFuture == null);
}
else if (scheduleGroup.pendingSplits.isEmpty()) {
// try to get the next batch
if (scheduleGroup.nextSplitBatchFuture == null) {
scheduleGroup.nextSplitBatchFuture = splitSource.getNextBatch(scheduleGroup.partitionHandle, lifespan, splitBatchSize);
long start = System.nanoTime();
addSuccessCallback(scheduleGroup.nextSplitBatchFuture, () -> stage.recordGetSplitTime(start));
}
if (scheduleGroup.nextSplitBatchFuture.isDone()) {
SplitBatch nextSplits = getFutureValue(scheduleGroup.nextSplitBatchFuture);
scheduleGroup.nextSplitBatchFuture = null;
scheduleGroup.pendingSplits = new HashSet<>(nextSplits.getSplits());
if (nextSplits.isLastBatch()) {
if (scheduleGroup.state == ScheduleGroupState.INITIALIZED && scheduleGroup.pendingSplits.isEmpty()) {
// Add an empty split in case no splits have been produced for the source.
// For source operators, they never take input, but they may produce output.
// This is well handled by Presto execution engine.
// However, there are certain non-source operators that may produce output without any input,
// for example, 1) an AggregationOperator, 2) a HashAggregationOperator where one of the grouping sets is ().
// Scheduling an empty split kicks off necessary driver instantiation to make this work.
scheduleGroup.pendingSplits.add(new Split(
splitSource.getConnectorId(),
splitSource.getTransactionHandle(),
new EmptySplit(splitSource.getConnectorId()),
lifespan,
NON_CACHEABLE));
}
scheduleGroup.state = ScheduleGroupState.NO_MORE_SPLITS;
}
}
else {
overallBlockedFutures.add(scheduleGroup.nextSplitBatchFuture);
anyBlockedOnNextSplitBatch = true;
continue;
}
}
Multimap<InternalNode, Split> splitAssignment = ImmutableMultimap.of();
if (!scheduleGroup.pendingSplits.isEmpty()) {
if (!scheduleGroup.placementFuture.isDone()) {
anyBlockedOnPlacements = true;
continue;
}
if (scheduleGroup.state == ScheduleGroupState.INITIALIZED) {
scheduleGroup.state = ScheduleGroupState.SPLITS_ADDED;
}
if (state == State.INITIALIZED) {
state = State.SPLITS_ADDED;
}
// calculate placements for splits
SplitPlacementResult splitPlacementResult = splitPlacementPolicy.computeAssignments(scheduleGroup.pendingSplits);
splitAssignment = splitPlacementResult.getAssignments();
// remove splits with successful placements
splitAssignment.values().forEach(scheduleGroup.pendingSplits::remove); // AbstractSet.removeAll performs terribly here.
overallSplitAssignmentCount += splitAssignment.size();
// if not completed placed, mark scheduleGroup as blocked on placement
if (!scheduleGroup.pendingSplits.isEmpty()) {
scheduleGroup.placementFuture = splitPlacementResult.getBlocked();
overallBlockedFutures.add(scheduleGroup.placementFuture);
anyBlockedOnPlacements = true;
}
}
// if no new splits will be assigned, update state and attach completion event
Multimap<InternalNode, Lifespan> noMoreSplitsNotification = ImmutableMultimap.of();
if (scheduleGroup.pendingSplits.isEmpty() && scheduleGroup.state == ScheduleGroupState.NO_MORE_SPLITS) {
scheduleGroup.state = ScheduleGroupState.DONE;
if (!lifespan.isTaskWide()) {
InternalNode node = ((BucketedSplitPlacementPolicy) splitPlacementPolicy).getNodeForBucket(lifespan.getId());
noMoreSplitsNotification = ImmutableMultimap.of(node, lifespan);
}
}
// assign the splits with successful placements
overallNewTasks.addAll(assignSplits(splitAssignment, noMoreSplitsNotification));
// Assert that "placement future is not done" implies "pendingSplits is not empty".
// The other way around is not true. One obvious reason is (un)lucky timing, where the placement is unblocked between `computeAssignments` and this line.
// However, there are other reasons that could lead to this.
// Note that `computeAssignments` is quite broken:
// 1. It always returns a completed future when there are no tasks, regardless of whether all nodes are blocked.
// 2. The returned future will only be completed when a node with an assigned task becomes unblocked. Other nodes don't trigger future completion.
// As a result, to avoid busy loops caused by 1, we check pendingSplits.isEmpty() instead of placementFuture.isDone() here.
if (scheduleGroup.nextSplitBatchFuture == null && scheduleGroup.pendingSplits.isEmpty() && scheduleGroup.state != ScheduleGroupState.DONE) {
anyNotBlocked = true;
}
}
// * `splitSource.isFinished` invocation may fail after `splitSource.close` has been invoked.
// If state is NO_MORE_SPLITS/FINISHED, splitSource.isFinished has previously returned true, and splitSource is closed now.
// * Even if `splitSource.isFinished()` return true, it is not necessarily safe to tear down the split source.
// * If anyBlockedOnNextSplitBatch is true, it means we have not checked out the recently completed nextSplitBatch futures,
// which may contain recently published splits. We must not ignore those.
// * If any scheduleGroup is still in DISCOVERING_SPLITS state, it means it hasn't realized that there will be no more splits.
// Next time it invokes getNextBatch, it will realize that. However, the invocation will fail we tear down splitSource now.
//
// Since grouped execution is going to support failure recovery, and scheduled splits might have to be rescheduled during retry,
// we can no longer claim schedule is complete after all splits are scheduled.
// Splits schedule can only be considered as finished when all lifespan executions are done
// (by calling `notifyAllLifespansFinishedExecution`)
if ((state == State.NO_MORE_SPLITS || state == State.FINISHED) || (!groupedExecution && lifespanAdded && scheduleGroups.isEmpty() && splitSource.isFinished())) {
switch (state) {
case INITIALIZED:
// We have not scheduled a single split so far.
// But this shouldn't be possible. See usage of EmptySplit in this method.
throw new IllegalStateException("At least 1 split should have been scheduled for this plan node");
case SPLITS_ADDED:
state = State.NO_MORE_SPLITS;
splitSource.close();
// fall through
case NO_MORE_SPLITS:
state = State.FINISHED;
whenFinishedOrNewLifespanAdded.set(null);
// fall through
case FINISHED:
return ScheduleResult.nonBlocked(
true,
overallNewTasks.build(),
overallSplitAssignmentCount);
default:
throw new IllegalStateException("Unknown state");
}
}
if (anyNotBlocked) {
return ScheduleResult.nonBlocked(false, overallNewTasks.build(), overallSplitAssignmentCount);
}
if (anyBlockedOnPlacements) {
// In a broadcast join, output buffers of the tasks in build source stage have to
// hold onto all data produced before probe side task scheduling finishes,
// even if the data is acknowledged by all known consumers. This is because
// new consumers may be added until the probe side task scheduling finishes.
//
// As a result, the following line is necessary to prevent deadlock
// due to neither build nor probe can make any progress.
// The build side blocks due to a full output buffer.
// In the meantime the probe side split cannot be consumed since
// builder side hash table construction has not finished.
//
// TODO: When SourcePartitionedScheduler is used as a SourceScheduler, it shouldn't need to worry about
// task scheduling and creation -- these are done by the StageScheduler.
overallNewTasks.addAll(finalizeTaskCreationIfNecessary());
}
ScheduleResult.BlockedReason blockedReason;
if (anyBlockedOnNextSplitBatch) {
blockedReason = anyBlockedOnPlacements ? MIXED_SPLIT_QUEUES_FULL_AND_WAITING_FOR_SOURCE : WAITING_FOR_SOURCE;
}
else {
blockedReason = anyBlockedOnPlacements ? SPLIT_QUEUES_FULL : NO_ACTIVE_DRIVER_GROUP;
}
overallBlockedFutures.add(whenFinishedOrNewLifespanAdded);
return ScheduleResult.blocked(
false,
overallNewTasks.build(),
nonCancellationPropagating(whenAnyComplete(overallBlockedFutures)),
blockedReason,
overallSplitAssignmentCount);
}
|
@Test
public void testScheduleSplitsOneAtATime()
{
SubPlan plan = createPlan();
NodeTaskMap nodeTaskMap = new NodeTaskMap(finalizerService);
SqlStageExecution stage = createSqlStageExecution(plan, nodeTaskMap);
StageScheduler scheduler = getSourcePartitionedScheduler(createFixedSplitSource(60, TestingSplit::createRemoteSplit), stage, nodeManager, nodeTaskMap, 1);
for (int i = 0; i < 60; i++) {
ScheduleResult scheduleResult = scheduler.schedule();
// only finishes when last split is fetched
if (i == 59) {
assertEffectivelyFinished(scheduleResult, scheduler);
}
else {
assertFalse(scheduleResult.isFinished());
}
// never blocks
assertTrue(scheduleResult.getBlocked().isDone());
// first three splits create new tasks
assertEquals(scheduleResult.getNewTasks().size(), i < 3 ? 1 : 0);
assertEquals(stage.getAllTasks().size(), i < 3 ? i + 1 : 3);
assertPartitionedSplitCount(stage, min(i + 1, 60));
}
for (RemoteTask remoteTask : stage.getAllTasks()) {
PartitionedSplitsInfo splitsInfo = remoteTask.getPartitionedSplitsInfo();
assertEquals(splitsInfo.getCount(), 20);
}
stage.abort();
}
|
@Override
public FsStateChangelogWriter createWriter(
String operatorID, KeyGroupRange keyGroupRange, MailboxExecutor mailboxExecutor) {
UUID logId = new UUID(0, logIdGenerator.getAndIncrement());
LOG.info("createWriter for operator {}/{}: {}", operatorID, keyGroupRange, logId);
return new FsStateChangelogWriter(
logId,
keyGroupRange,
uploader,
preEmptivePersistThresholdInBytes,
mailboxExecutor,
changelogRegistry,
localRecoveryConfig,
localChangelogRegistry);
}
|
@Test
public void testDeadlockOnUploadCompletion() throws Throwable {
int capacity = 10; // in bytes, allow the first two uploads without waiting (see below)
CountDownLatch remainingUploads = new CountDownLatch(3);
BlockingUploader blockingUploader = new BlockingUploader();
CompletableFuture<Void> unblockFuture = new CompletableFuture<>();
new Thread(
() -> {
try {
remainingUploads.await();
blockingUploader.unblock();
unblockFuture.complete(null);
} catch (Throwable e) {
unblockFuture.completeExceptionally(e);
}
})
.start();
MailboxExecutorImpl mailboxExecutor =
new MailboxExecutorImpl(
new TaskMailboxImpl(), 0, StreamTaskActionExecutor.IMMEDIATE);
try (BatchingStateChangeUploadScheduler scheduler =
new BatchingStateChangeUploadScheduler(
0, // schedule immediately
0, // schedule immediately
RetryPolicy.NONE,
blockingUploader,
1,
capacity,
createUnregisteredChangelogStorageMetricGroup()) {
@Override
public void upload(UploadTask uploadTask) throws IOException {
remainingUploads.countDown();
super.upload(uploadTask);
}
};
StateChangelogWriter<?> writer =
new FsStateChangelogStorage(
scheduler,
0,
TaskChangelogRegistry.NO_OP, /* persist immediately */
TestLocalRecoveryConfig.disabled())
.createWriter(
new OperatorID().toString(),
KeyGroupRange.of(0, 0),
mailboxExecutor); ) {
// 1. start with 1-byte request - releasing only it will NOT allow proceeding in 3, but
// still involves completion callback, which can deadlock
writer.append(0, new byte[1]);
// 2. exceed capacity
writer.append(0, new byte[capacity]);
// 3. current thread will block until both previous requests are completed
// verify that completion can proceed while this thread is waiting
writer.append(0, new byte[1]);
}
// check unblocking thread exit status
unblockFuture.join();
}
|
public static Options options() {
return new Options("/tmp", 100, SorterType.HADOOP);
}
|
@Test
public void testNegativeMemory() {
thrown.expect(IllegalArgumentException.class);
thrown.expectMessage("memoryMB must be greater than zero");
BufferedExternalSorter.Options options =
BufferedExternalSorter.options().withTempLocation(getTmpLocation().toString());
options.withMemoryMB(-1);
}
|
void placeOrder(Order order) {
sendShippingRequest(order);
}
|
@Test
void testPlaceOrderUnknownException() throws Exception {
long paymentTime = timeLimits.paymentTime();
long queueTaskTime = timeLimits.queueTaskTime();
long messageTime = timeLimits.messageTime();
long employeeTime = timeLimits.employeeTime();
long queueTime = timeLimits.queueTime();
for (double d = 0.1; d < 2; d = d + 0.1) {
paymentTime *= d;
queueTaskTime *= d;
messageTime *= d;
employeeTime *= d;
queueTime *= d;
Commander c = buildCommanderObjectUnknownException();
var order = new Order(new User("K", "J"), "pen", 1f);
for (Order.MessageSent ms : Order.MessageSent.values()) {
c.placeOrder(order);
assertFalse(StringUtils.isBlank(order.id));
}
}
}
|
public boolean includes(String ipAddress) {
if (all) {
return true;
}
if (ipAddress == null) {
throw new IllegalArgumentException("ipAddress is null.");
}
try {
return includes(addressFactory.getByName(ipAddress));
} catch (UnknownHostException e) {
return false;
}
}
|
@Test
public void testCIDRWith8BitMask() {
//create MachineList with a list of of ip ranges specified in CIDR format
MachineList ml = new MachineList(CIDR_LIST2, new TestAddressFactory());
//test for inclusion/exclusion
assertFalse(ml.includes("10.241.22.255"));
assertTrue(ml.includes("10.241.23.0"));
assertTrue(ml.includes("10.241.23.1"));
assertTrue(ml.includes("10.241.23.254"));
assertTrue(ml.includes("10.241.23.255"));
assertFalse(ml.includes("10.241.24.0"));
//test for exclusion with an unknown IP
assertFalse(ml.includes("10.119.103.111"));
}
|
public static float bytesToFloatBE(byte[] bytes, int off) {
return Float.intBitsToFloat(bytesToIntBE(bytes, off));
}
|
@Test
public void testBytesToFloatBE() {
assertEquals((float) Math.PI,
ByteUtils.bytesToFloatBE(FLOAT_PI_BE, 0), 0);
}
|
@Override
public void putAll(final List<KeyValue<K, V>> entries) {
entries.forEach(entry -> Objects.requireNonNull(entry.key, "key cannot be null"));
maybeMeasureLatency(() -> wrapped().putAll(innerEntries(entries)), time, putAllSensor);
}
|
@Test
public void shouldThrowNullPointerOnPutAllIfAnyKeyIsNull() {
setUpWithoutContext();
assertThrows(NullPointerException.class, () -> metered.putAll(Collections.singletonList(KeyValue.pair(null, VALUE))));
}
|
String getRole(DecodedJWT jwt) {
try {
Claim roleClaim = jwt.getClaim(this.roleClaim);
if (roleClaim.isNull()) {
// The claim was not present in the JWT
return null;
}
String role = roleClaim.asString();
if (role != null) {
// The role is non null only if the JSON node is a text field
return role;
}
List<String> roles = jwt.getClaim(this.roleClaim).asList(String.class);
if (roles == null || roles.size() == 0) {
return null;
} else if (roles.size() == 1) {
return roles.get(0);
} else {
log.debug("JWT for subject [{}] has multiple roles; using the first one.", jwt.getSubject());
return roles.get(0);
}
} catch (JWTDecodeException e) {
log.error("Exception while retrieving role from JWT", e);
return null;
}
}
|
@Test void ensureMissingRoleClaimReturnsNull() throws Exception {
// Build an empty JWT
DefaultJwtBuilder defaultJwtBuilder = new DefaultJwtBuilder();
defaultJwtBuilder.setAudience(basicProviderAudience);
DecodedJWT jwtWithoutSub = JWT.decode(defaultJwtBuilder.compact());
// A JWT with an empty role claim must result in a null role
assertNull(basicProvider.getRole(jwtWithoutSub));
}
|
public CoercedExpressionResult coerce() {
final Class<?> leftClass = left.getRawClass();
final Class<?> nonPrimitiveLeftClass = toNonPrimitiveType(leftClass);
final Class<?> rightClass = right.getRawClass();
final Class<?> nonPrimitiveRightClass = toNonPrimitiveType(rightClass);
boolean sameClass = leftClass == rightClass;
boolean isUnificationExpression = left instanceof UnificationTypedExpression || right instanceof UnificationTypedExpression;
if (sameClass || isUnificationExpression) {
return new CoercedExpressionResult(left, right);
}
if (!canCoerce()) {
throw new CoercedExpressionException(new InvalidExpressionErrorResult("Comparison operation requires compatible types. Found " + leftClass + " and " + rightClass));
}
if ((nonPrimitiveLeftClass == Integer.class || nonPrimitiveLeftClass == Long.class) && nonPrimitiveRightClass == Double.class) {
CastExpr castExpression = new CastExpr(PrimitiveType.doubleType(), this.left.getExpression());
return new CoercedExpressionResult(
new TypedExpression(castExpression, double.class, left.getType()),
right,
false);
}
final boolean leftIsPrimitive = leftClass.isPrimitive() || Number.class.isAssignableFrom( leftClass );
final boolean canCoerceLiteralNumberExpr = canCoerceLiteralNumberExpr(leftClass);
boolean rightAsStaticField = false;
final Expression rightExpression = right.getExpression();
final TypedExpression coercedRight;
if (leftIsPrimitive && canCoerceLiteralNumberExpr && rightExpression instanceof LiteralStringValueExpr) {
final Expression coercedLiteralNumberExprToType = coerceLiteralNumberExprToType((LiteralStringValueExpr) right.getExpression(), leftClass);
coercedRight = right.cloneWithNewExpression(coercedLiteralNumberExprToType);
coercedRight.setType( leftClass );
} else if (shouldCoerceBToString(left, right)) {
coercedRight = coerceToString(right);
} else if (isNotBinaryExpression(right) && canBeNarrowed(leftClass, rightClass) && right.isNumberLiteral()) {
coercedRight = castToClass(leftClass);
} else if (leftClass == long.class && rightClass == int.class) {
coercedRight = right.cloneWithNewExpression(new CastExpr(PrimitiveType.longType(), right.getExpression()));
} else if (leftClass == Date.class && rightClass == String.class) {
coercedRight = coerceToDate(right);
rightAsStaticField = true;
} else if (leftClass == LocalDate.class && rightClass == String.class) {
coercedRight = coerceToLocalDate(right);
rightAsStaticField = true;
} else if (leftClass == LocalDateTime.class && rightClass == String.class) {
coercedRight = coerceToLocalDateTime(right);
rightAsStaticField = true;
} else if (shouldCoerceBToMap()) {
coercedRight = castToClass(toNonPrimitiveType(leftClass));
} else if (isBoolean(leftClass) && !isBoolean(rightClass)) {
coercedRight = coerceBoolean(right);
} else {
coercedRight = right;
}
final TypedExpression coercedLeft;
if (nonPrimitiveLeftClass == Character.class && shouldCoerceBToString(right, left)) {
coercedLeft = coerceToString(left);
} else {
coercedLeft = left;
}
return new CoercedExpressionResult(coercedLeft, coercedRight, rightAsStaticField);
}
|
@Test
public void doNotCastNameExprLiterals2() {
final TypedExpression left = expr("exprDouble", java.lang.Double.class);
final TypedExpression right = expr("$age", int.class);
final CoercedExpression.CoercedExpressionResult coerce = new CoercedExpression(left, right, false).coerce();
assertThat(coerce.getCoercedRight()).isEqualTo(expr("$age", int.class));
}
|
public static void main(String[] args) {
// Dummy Persons
Person person1 = new Person(1, "John", 27304159);
Person person2 = new Person(2, "Thomas", 42273631);
Person person3 = new Person(3, "Arthur", 27489171);
Person person4 = new Person(4, "Finn", 20499078);
Person person5 = new Person(5, "Michael", 40599078);
// Init database
PersonDbSimulatorImplementation db = new PersonDbSimulatorImplementation();
db.insert(person1);
db.insert(person2);
db.insert(person3);
db.insert(person4);
db.insert(person5);
// Init a personFinder
PersonFinder finder = new PersonFinder();
finder.setDb(db);
// Find persons in DataBase not the map.
LOGGER.info(finder.getPerson(2).toString());
LOGGER.info(finder.getPerson(4).toString());
LOGGER.info(finder.getPerson(5).toString());
// Find the person in the map.
LOGGER.info(finder.getPerson(2).toString());
}
|
@Test
void shouldExecuteApplicationWithoutException() {
assertDoesNotThrow(() -> App.main(new String[]{}));
}
|
void uncaughtExceptionInternal(
final Thread t,
final Throwable e,
final SystemExit systemExit) {
if (t instanceof StreamThread) {
countDownLatch.ifPresent(CountDownLatch::countDown);
return;
}
log.error("Unhandled exception caught in thread {}.", t.getName(), e);
System.err.println(
"Unhandled exception caught in thread: " + t.getName() + ". Exception:" + e.getMessage());
flusher.run();
systemExit.exit(-1);
}
|
@Test
public void shouldNotSystemExitWhenStreamThreadThrowsAnError() throws InterruptedException {
// When
final CountDownLatch latch = new CountDownLatch(1);
KsqlUncaughtExceptionHandler handler = new KsqlUncaughtExceptionHandler(LogManager::shutdown, Optional.of(latch));
handler.uncaughtExceptionInternal(streamThread, new Exception(), new MockSystemExit());
// Then
assertThat(latch.await(60000, TimeUnit.MILLISECONDS), is(true));
}
|
@Override
public char readChar() {
return (char) readShort();
}
|
@Test
public void testReadCharAfterRelease() {
assertThrows(IllegalReferenceCountException.class, new Executable() {
@Override
public void execute() {
releasedBuffer().readChar();
}
});
}
|
public void writeUbyte(int value) throws IOException {
if (value < 0 || value > 0xFF) {
throw new ExceptionWithContext("Unsigned byte value out of range: %d", value);
}
write(value);
}
|
@Test(expected=ExceptionWithContext.class)
public void testWriteUbyteOutOfBounds2() throws IOException {
writer.writeUbyte(256);
}
|
static boolean isWindows0(String osName) {
return osName.toLowerCase().contains("windows");
}
|
@Test
public void test_isWindows0() {
assertTrue(OS.isWindows0("Windows"));
assertTrue(OS.isWindows0("wInDoWs"));
assertTrue(OS.isWindows0("Windows 10"));
assertTrue(OS.isWindows0("Windows 11"));
assertFalse(OS.isWindows0("LINUX"));
assertFalse(OS.isWindows0("LiNuX"));
assertFalse(OS.isWindows0("linux"));
assertFalse(OS.isWindows0("Mac OS X"));
}
|
public static long fromHex(String string) {
return UnsignedLongs.parseUnsignedLong(string, 16);
}
|
@Test
public void fromHex() throws Exception {
assertEquals(15, Tools.fromHex("0f"));
assertEquals(16, Tools.fromHex("10"));
assertEquals(65535, Tools.fromHex("ffff"));
assertEquals(4096, Tools.fromHex("1000"));
assertEquals(0xffffffffffffffffL, Tools.fromHex("ffffffffffffffff"));
}
|
@Override
@Transactional(value="defaultTransactionManager")
public OAuth2AccessTokenEntity refreshAccessToken(String refreshTokenValue, TokenRequest authRequest) throws AuthenticationException {
if (Strings.isNullOrEmpty(refreshTokenValue)) {
// throw an invalid token exception if there's no refresh token value at all
throw new InvalidTokenException("Invalid refresh token: " + refreshTokenValue);
}
OAuth2RefreshTokenEntity refreshToken = clearExpiredRefreshToken(tokenRepository.getRefreshTokenByValue(refreshTokenValue));
if (refreshToken == null) {
// throw an invalid token exception if we couldn't find the token
throw new InvalidTokenException("Invalid refresh token: " + refreshTokenValue);
}
ClientDetailsEntity client = refreshToken.getClient();
AuthenticationHolderEntity authHolder = refreshToken.getAuthenticationHolder();
// make sure that the client requesting the token is the one who owns the refresh token
ClientDetailsEntity requestingClient = clientDetailsService.loadClientByClientId(authRequest.getClientId());
if (!client.getClientId().equals(requestingClient.getClientId())) {
tokenRepository.removeRefreshToken(refreshToken);
throw new InvalidClientException("Client does not own the presented refresh token");
}
//Make sure this client allows access token refreshing
if (!client.isAllowRefresh()) {
throw new InvalidClientException("Client does not allow refreshing access token!");
}
// clear out any access tokens
if (client.isClearAccessTokensOnRefresh()) {
tokenRepository.clearAccessTokensForRefreshToken(refreshToken);
}
if (refreshToken.isExpired()) {
tokenRepository.removeRefreshToken(refreshToken);
throw new InvalidTokenException("Expired refresh token: " + refreshTokenValue);
}
OAuth2AccessTokenEntity token = new OAuth2AccessTokenEntity();
// get the stored scopes from the authentication holder's authorization request; these are the scopes associated with the refresh token
Set<String> refreshScopesRequested = new HashSet<>(refreshToken.getAuthenticationHolder().getAuthentication().getOAuth2Request().getScope());
Set<SystemScope> refreshScopes = scopeService.fromStrings(refreshScopesRequested);
// remove any of the special system scopes
refreshScopes = scopeService.removeReservedScopes(refreshScopes);
Set<String> scopeRequested = authRequest.getScope() == null ? new HashSet<String>() : new HashSet<>(authRequest.getScope());
Set<SystemScope> scope = scopeService.fromStrings(scopeRequested);
// remove any of the special system scopes
scope = scopeService.removeReservedScopes(scope);
if (scope != null && !scope.isEmpty()) {
// ensure a proper subset of scopes
if (refreshScopes != null && refreshScopes.containsAll(scope)) {
// set the scope of the new access token if requested
token.setScope(scopeService.toStrings(scope));
} else {
String errorMsg = "Up-scoping is not allowed.";
logger.error(errorMsg);
throw new InvalidScopeException(errorMsg);
}
} else {
// otherwise inherit the scope of the refresh token (if it's there -- this can return a null scope set)
token.setScope(scopeService.toStrings(refreshScopes));
}
token.setClient(client);
if (client.getAccessTokenValiditySeconds() != null) {
Date expiration = new Date(System.currentTimeMillis() + (client.getAccessTokenValiditySeconds() * 1000L));
token.setExpiration(expiration);
}
if (client.isReuseRefreshToken()) {
// if the client re-uses refresh tokens, do that
token.setRefreshToken(refreshToken);
} else {
// otherwise, make a new refresh token
OAuth2RefreshTokenEntity newRefresh = createRefreshToken(client, authHolder);
token.setRefreshToken(newRefresh);
// clean up the old refresh token
tokenRepository.removeRefreshToken(refreshToken);
}
token.setAuthenticationHolder(authHolder);
tokenEnhancer.enhance(token, authHolder.getAuthentication());
tokenRepository.saveAccessToken(token);
return token;
}
|
@Test
public void refreshAccessToken_requestingSameScope() {
OAuth2AccessTokenEntity token = service.refreshAccessToken(refreshTokenValue, tokenRequest);
verify(scopeService, atLeastOnce()).removeReservedScopes(anySet());
assertThat(token.getScope(), equalTo(storedScope));
}
|
@Override
public void setConfigAttributes(Object attributes) {
clear();
if (attributes == null) {
return;
}
Map attributeMap = (Map) attributes;
String materialType = (String) attributeMap.get(AbstractMaterialConfig.MATERIAL_TYPE);
if (SvnMaterialConfig.TYPE.equals(materialType)) {
addMaterialConfig(getSvnMaterial(), (Map) attributeMap.get(SvnMaterialConfig.TYPE));
} else if (HgMaterialConfig.TYPE.equals(materialType)) {
addMaterialConfig(getHgMaterial(), (Map) attributeMap.get(HgMaterialConfig.TYPE));
} else if (GitMaterialConfig.TYPE.equals(materialType)) {
addMaterialConfig(getGitMaterial(), (Map) attributeMap.get(GitMaterialConfig.TYPE));
} else if (P4MaterialConfig.TYPE.equals(materialType)) {
addMaterialConfig(getP4Material(), (Map) attributeMap.get(P4MaterialConfig.TYPE));
} else if (DependencyMaterialConfig.TYPE.equals(materialType)) {
addMaterialConfig(getDependencyMaterial(), (Map) attributeMap.get(DependencyMaterialConfig.TYPE));
} else if (TfsMaterialConfig.TYPE.equals(materialType)) {
addMaterialConfig(getTfsMaterial(), (Map) attributeMap.get(TfsMaterialConfig.TYPE));
} else if (PackageMaterialConfig.TYPE.equals(materialType)) {
addMaterialConfig(getPackageMaterial(), (Map) attributeMap.get(PackageMaterialConfig.TYPE));
} else if (PluggableSCMMaterialConfig.TYPE.equals(materialType)) {
addMaterialConfig(getSCMMaterial(), (Map) attributeMap.get(PluggableSCMMaterialConfig.TYPE));
}
}
|
@Test
public void shouldSetGitConfigAttributesForMaterial() {
MaterialConfigs materialConfigs = new MaterialConfigs();
Map<String, String> hashMap = new HashMap<>();
hashMap.put(GitMaterialConfig.URL, "foo");
hashMap.put(GitMaterialConfig.BRANCH, "master");
HashMap<String, Object> attributeMap = new HashMap<>();
attributeMap.put(AbstractMaterialConfig.MATERIAL_TYPE, GitMaterialConfig.TYPE);
attributeMap.put(GitMaterialConfig.TYPE, hashMap);
materialConfigs.setConfigAttributes(attributeMap);
assertThat(materialConfigs).hasSize(1);
GitMaterialConfig expected = git("foo");
expected.setConfigAttributes(Map.of(GitMaterialConfig.BRANCH, "master"));
assertThat(materialConfigs.first()).isEqualTo(expected);
}
|
public SelType evaluate(String expr, Map<String, Object> varsMap, Extension ext)
throws Exception {
checkExprLength(expr);
selParser.ReInit(new ByteArrayInputStream(expr.getBytes()));
ASTExecute n = selParser.Execute();
try {
selEvaluator.resetWithInput(varsMap, ext);
return (SelType) n.jjtAccept(selEvaluator, null);
} finally {
selEvaluator.clearState();
}
}
|
@Test(expected = IllegalArgumentException.class)
public void testInvalidEvaluate() throws Exception {
t1.evaluate("Integer.valueOf(new int[1, 2]);", new HashMap<>(), null);
}
|
@Override
public String getIdentifier(UserInfo userInfo, ClientDetailsEntity client) {
String sectorIdentifier = null;
if (!Strings.isNullOrEmpty(client.getSectorIdentifierUri())) {
UriComponents uri = UriComponentsBuilder.fromUriString(client.getSectorIdentifierUri()).build();
sectorIdentifier = uri.getHost(); // calculate based on the host component only
} else {
Set<String> redirectUris = client.getRedirectUris();
UriComponents uri = UriComponentsBuilder.fromUriString(Iterables.getOnlyElement(redirectUris)).build();
sectorIdentifier = uri.getHost(); // calculate based on the host of the only redirect URI
}
if (sectorIdentifier != null) {
// if there's a sector identifier, use that for the lookup
PairwiseIdentifier pairwise = pairwiseIdentifierRepository.getBySectorIdentifier(userInfo.getSub(), sectorIdentifier);
if (pairwise == null) {
// we don't have an identifier, need to make and save one
pairwise = new PairwiseIdentifier();
pairwise.setIdentifier(UUID.randomUUID().toString());
pairwise.setUserSub(userInfo.getSub());
pairwise.setSectorIdentifier(sectorIdentifier);
pairwiseIdentifierRepository.save(pairwise);
}
return pairwise.getIdentifier();
} else {
return null;
}
}
|
@Test
public void testGetIdentifer_unique() {
String pairwise1 = service.getIdentifier(userInfoRegular, pairwiseClient1);
String pairwise3 = service.getIdentifier(userInfoRegular, pairwiseClient3);
String pairwise4 = service.getIdentifier(userInfoRegular, pairwiseClient4);
// make sure nothing's equal
assertNotSame(pairwise1, pairwise3);
assertNotSame(pairwise1, pairwise4);
assertNotSame(pairwise3, pairwise4);
// see if the pairwise id's are actual UUIDs
UUID uudi1 = UUID.fromString(pairwise1);
UUID uudi3 = UUID.fromString(pairwise3);
UUID uudi4 = UUID.fromString(pairwise4);
}
|
public static void uncheck(RunnableWithExceptions t) {
try {
t.run();
} catch (Exception exception) {
throwAsUnchecked(exception);
}
}
|
@Test(expected = ClassNotFoundException.class)
public void test_if_correct_exception_is_still_thrown_by_method() {
Class clazz3 = uncheck(Class::forName, "INVALID");
}
|
@Override
public SelJodaDateTimeFormatter assignOps(SelOp op, SelType rhs) {
if (op == SelOp.ASSIGN) {
SelTypeUtil.checkTypeMatch(this.type(), rhs.type());
this.val = ((SelJodaDateTimeFormatter) rhs).val; // direct assignment
return this;
}
throw new UnsupportedOperationException(type() + " DO NOT support assignment operation " + op);
}
|
@Test
public void testAssignOps() {
one.assignOps(SelOp.ASSIGN, another);
assertEquals(another.getInternalVal(), one.getInternalVal());
}
|
public LogoutRequestModel parseLogoutRequest(HttpServletRequest request) throws SamlValidationException, SamlParseException, SamlSessionException, DienstencatalogusException {
final LogoutRequestModel logoutRequestModel = new LogoutRequestModel();
try {
final BaseHttpServletRequestXMLMessageDecoder decoder = decodeRequest(request);
var logoutRequest = (LogoutRequest) decoder.getMessageContext().getMessage();
final SAMLBindingContext bindingContext = decoder.getMessageContext().getSubcontext(SAMLBindingContext.class);
logoutRequestModel.setLogoutRequest(logoutRequest);
logoutRequestModel.setRequest(request);
validateRequest(logoutRequestModel);
var id = logoutRequest.getNameID() != null ? logoutRequest.getNameID().getValue() : logoutRequest.getSessionIndexes().get(0).getValue();
var samlSession = samlSessionRepository.findById(id)
.orElseThrow(() -> new SamlSessionException("LogoutRequest no saml session found for nameID: " + id));
logoutRequestModel.setConnectionEntityId(samlSession.getConnectionEntityId());
logoutRequestModel.setServiceEntityId(samlSession.getServiceEntityId());
logoutRequestModel.setServiceUuid(samlSession.getServiceUuid());
logoutRequestModel.setRelayState(bindingContext.getRelayState());
logoutRequestModel.setEntranceSession(samlSession.getProtocolType().equals(ProtocolType.SAML_COMBICONNECT));
dcMetadataService.resolveDcMetadata(logoutRequestModel);
if (!logoutRequestModel.getConnectionEntityId().equals(logoutRequestModel.getLogoutRequest().getIssuer().getValue())) {
throw new SamlValidationException("Issuer not equal to connectorEntityId");
}
verifySignature(logoutRequestModel, logoutRequestModel.getLogoutRequest().getSignature());
logout(samlSession);
if (logger.isDebugEnabled())
OpenSAMLUtils.logSAMLObject((LogoutRequest) decoder.getMessageContext().getMessage());
} catch (MessageDecodingException e) {
throw new SamlParseException("Authentication deflate decode exception", e);
} catch (ComponentInitializationException e) {
throw new SamlParseException("Authentication deflate initialization exception", e);
}
return logoutRequestModel;
}
|
@Test
public void parseLogoutRequestWrongVersion() {
httpRequestMock.setParameter("SAMLRequest", "SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS");
Exception exception = assertThrows(SamlValidationException.class,
() -> logoutService.parseLogoutRequest(httpRequestMock));
assertEquals("LogoutRequest validation error", exception.getMessage());
}
|
@Override
public boolean tryClaim(Timestamp position) {
if (position.equals(lastAttemptedPosition)) {
return true;
}
return super.tryClaim(position);
}
|
@Test
public void testTryClaim() {
assertEquals(range, tracker.currentRestriction());
assertTrue(tracker.tryClaim(Timestamp.ofTimeMicroseconds(10L)));
assertTrue(tracker.tryClaim(Timestamp.ofTimeMicroseconds(10L)));
assertTrue(tracker.tryClaim(Timestamp.ofTimeMicroseconds(11L)));
assertTrue(tracker.tryClaim(Timestamp.ofTimeMicroseconds(11L)));
assertTrue(tracker.tryClaim(Timestamp.ofTimeMicroseconds(19L)));
assertFalse(tracker.tryClaim(Timestamp.ofTimeMicroseconds(20L)));
}
|
public static String getMimeType(Path file) {
try {
return Files.probeContentType(file);
} catch (IOException ignore) {
// issue#3179,使用OpenJDK可能抛出NoSuchFileException,此处返回null
return null;
}
}
|
@Test
public void issue3179Test() {
final String mimeType = PathUtil.getMimeType(Paths.get("xxxx.jpg"));
assertEquals("image/jpeg", mimeType);
}
|
public List<Document> export(final String collectionName,
final List<String> exportedFieldNames,
final int limit,
final Bson dbFilter,
final List<Sort> sorts,
final Subject subject) {
final MongoCollection<Document> collection = mongoConnection.getMongoDatabase().getCollection(collectionName);
final FindIterable<Document> resultsWithoutLimit = collection.find(Objects.requireNonNullElse(dbFilter, Filters.empty()))
.projection(Projections.fields(Projections.include(exportedFieldNames)))
.sort(toMongoDbSort(sorts));
final var userCanReadAllEntities = permissionsUtils.hasAllPermission(subject) || permissionsUtils.hasReadPermissionForWholeCollection(subject, collectionName);
final var checkPermission = permissionsUtils.createPermissionCheck(subject, collectionName);
final var documents = userCanReadAllEntities
? getFromMongo(resultsWithoutLimit, limit)
: getWithInMemoryPermissionCheck(resultsWithoutLimit, limit, checkPermission);
return documents.collect(Collectors.toList());
}
|
@Test
void testExportUsesFilterCorrectly() {
insertTestData();
simulateAdminUser();
final List<Document> exportedDocuments = toTest.export(TEST_COLLECTION_NAME,
List.of("name"),
200,
Filters.gt("age", 40),
List.of(),
subject);
assertThat(exportedDocuments)
.isNotNull()
.hasSize(1)
.containsExactly(
new Document(Map.of("_id", "0000000000000000000000a5", "name", "John"))
);
}
|
public int computeMinVersion() {
int minVersion = Integer.MAX_VALUE;
for (Connection c : this.connectionSet) {
if (c.getVersion() < minVersion) {
minVersion = c.getVersion();
}
}
return minVersion;
}
|
@Test
public void testComputeMinVersion() {
ConsumerConnection consumerConnection = new ConsumerConnection();
HashSet<Connection> connections = new HashSet<>();
Connection conn1 = new Connection();
conn1.setVersion(1);
connections.add(conn1);
Connection conn2 = new Connection();
conn2.setVersion(10);
connections.add(conn2);
consumerConnection.setConnectionSet(connections);
int version = consumerConnection.computeMinVersion();
assertThat(version).isEqualTo(1);
}
|
@Override
public boolean dropNamespace(Namespace namespace) throws NamespaceNotEmptyException {
namespaceExists(namespace);
GetTablesResponse response =
glue.getTables(
GetTablesRequest.builder()
.catalogId(awsProperties.glueCatalogId())
.databaseName(
IcebergToGlueConverter.toDatabaseName(
namespace, awsProperties.glueCatalogSkipNameValidation()))
.build());
if (response.hasTableList() && !response.tableList().isEmpty()) {
Table table = response.tableList().get(0);
if (isGlueIcebergTable(table)) {
throw new NamespaceNotEmptyException(
"Cannot drop namespace %s because it still contains Iceberg tables", namespace);
} else {
throw new NamespaceNotEmptyException(
"Cannot drop namespace %s because it still contains non-Iceberg tables", namespace);
}
}
glue.deleteDatabase(
DeleteDatabaseRequest.builder()
.catalogId(awsProperties.glueCatalogId())
.name(
IcebergToGlueConverter.toDatabaseName(
namespace, awsProperties.glueCatalogSkipNameValidation()))
.build());
LOG.info("Dropped namespace: {}", namespace);
// Always successful, otherwise exception is thrown
return true;
}
|
@Test
public void testDropNamespace() {
Mockito.doReturn(GetTablesResponse.builder().build())
.when(glue)
.getTables(Mockito.any(GetTablesRequest.class));
Mockito.doReturn(
GetDatabaseResponse.builder().database(Database.builder().name("db1").build()).build())
.when(glue)
.getDatabase(Mockito.any(GetDatabaseRequest.class));
Mockito.doReturn(DeleteDatabaseResponse.builder().build())
.when(glue)
.deleteDatabase(Mockito.any(DeleteDatabaseRequest.class));
glueCatalog.dropNamespace(Namespace.of("db1"));
}
|
@Override
public ByteBuf getBytes(int index, byte[] dst) {
getBytes(index, dst, 0, dst.length);
return this;
}
|
@Test
public void getByteArrayBoundaryCheck1() {
assertThrows(IndexOutOfBoundsException.class, new Executable() {
@Override
public void execute() {
buffer.getBytes(-1, EMPTY_BYTES);
}
});
}
|
@Override
public boolean tableExists(String dbName, String tableName) {
return delegate.tableExists(dbName, tableName);
}
|
@Test
public void testTableExists() {
CachingDeltaLakeMetastore cachingDeltaLakeMetastore =
CachingDeltaLakeMetastore.createCatalogLevelInstance(metastore, executor, expireAfterWriteSec,
refreshAfterWriteSec, 100);
Assert.assertTrue(cachingDeltaLakeMetastore.tableExists("db1", "table1"));
}
|
public void remove(PropertyKey key) {
// remove is a nop if the key doesn't already exist
if (mUserProps.containsKey(key)) {
mUserProps.remove(key);
mSources.remove(key);
mHash.markOutdated();
}
}
|
@Test
public void remove() {
mProperties.remove(mKeyWithValue);
assertEquals(mKeyWithValue.getDefaultValue(), mProperties.get(mKeyWithValue));
assertEquals(Source.DEFAULT, mProperties.getSource(mKeyWithValue));
}
|
@Override
public boolean syncVerifyData(DistroData verifyData, String targetServer) {
if (isNoExistTarget(targetServer)) {
return true;
}
// replace target server as self server so that can callback.
verifyData.getDistroKey().setTargetServer(memberManager.getSelf().getAddress());
DistroDataRequest request = new DistroDataRequest(verifyData, DataOperation.VERIFY);
Member member = memberManager.find(targetServer);
if (checkTargetServerStatusUnhealthy(member)) {
Loggers.DISTRO
.warn("[DISTRO] Cancel distro verify caused by target server {} unhealthy, key: {}", targetServer,
verifyData.getDistroKey());
return false;
}
try {
Response response = clusterRpcClientProxy.sendRequest(member, request);
return checkResponse(response);
} catch (NacosException e) {
Loggers.DISTRO.error("[DISTRO-FAILED] Verify distro data failed! key: {} ", verifyData.getDistroKey(), e);
}
return false;
}
|
@Test
void testSyncVerifyDataWithCallbackForMemberUnhealthy() throws NacosException {
DistroData verifyData = new DistroData();
verifyData.setDistroKey(new DistroKey());
when(memberManager.hasMember(member.getAddress())).thenReturn(true);
when(memberManager.find(member.getAddress())).thenReturn(member);
transportAgent.syncVerifyData(verifyData, member.getAddress(), distroCallback);
verify(distroCallback).onFailed(null);
verify(clusterRpcClientProxy, never()).asyncRequest(any(Member.class), any(), any());
}
|
public Properties getProperties()
{
return properties;
}
|
@Test
public void testUriWithSslEnabled()
throws SQLException
{
PrestoDriverUri parameters = createDriverUri("presto://localhost:8080/blackhole?SSL=true");
assertUriPortScheme(parameters, 8080, "https");
Properties properties = parameters.getProperties();
assertNull(properties.getProperty(SSL_TRUST_STORE_PATH.getKey()));
assertNull(properties.getProperty(SSL_TRUST_STORE_PASSWORD.getKey()));
}
|
@Override
public SSLContext getIdentitySslContext() {
return sslContext;
}
|
@Test
void constructs_ssl_context_with_pem_trust_store() throws IOException {
File keyFile = File.createTempFile("junit", null, tempDirectory);
KeyPair keypair = KeyUtils.generateKeypair(KeyAlgorithm.RSA);
createPrivateKeyFile(keyFile, keypair);
X509Certificate certificate = createCertificate(keypair);
File certificateFile = File.createTempFile("junit", null, tempDirectory);
createCertificateFile(certificate, certificateFile);
File trustStoreFile = File.createTempFile("junit", null, tempDirectory);
createPemTrustStoreFile(certificate, trustStoreFile);
SiaIdentityProvider provider =
new SiaIdentityProvider(
new AthenzService("domain", "service-name"),
keyFile.toPath(),
certificateFile.toPath(),
trustStoreFile.toPath());
assertNotNull(provider.getIdentitySslContext());
}
|
public void mirrorKeys() {
/* how to mirror?
width = 55
[0..15] [20..35] [40..55]
phase 1: multiple by -1
[0] [-20] [-40]
phase 2: add keyboard width
[55] [35] [15]
phase 3: subtracting the key's width
[40] [20] [0]
cool?
*/
final int keyboardWidth = getMinWidth();
for (Key k : getKeys()) {
var newX = -1 * k.x; // phase 1
newX += keyboardWidth; // phase 2
newX -= k.width; // phase 3
k.x = newX;
}
}
|
@Test
public void testKeyboardPopupSupportsMirrorMultipleFullRows() throws Exception {
String popupCharacters = "qwertasdfg";
// asdfg
// qwert
AnyPopupKeyboard keyboard =
new AnyPopupKeyboard(
new DefaultAddOn(getApplicationContext(), getApplicationContext()),
getApplicationContext(),
popupCharacters,
SIMPLE_KeyboardDimens,
"POP_KEYBOARD");
int vGap = (int) SIMPLE_KeyboardDimens.getRowVerticalGap();
int keyHeight = (int) SIMPLE_KeyboardDimens.getNormalKeyHeight();
int hGap = (int) SIMPLE_KeyboardDimens.getKeyHorizontalGap();
final int keyWidth =
(int)
(SIMPLE_KeyboardDimens.getKeyboardMaxWidth()
- SIMPLE_KeyboardDimens.getKeyHorizontalGap() * popupCharacters.length())
/ 10;
Assert.assertEquals(10, keyboard.getKeys().size());
assertKeyValues(keyboard, 'q', vGap + keyHeight + vGap, 0);
assertKeyValues(keyboard, 'w', vGap + keyHeight + vGap, keyWidth);
assertKeyValues(keyboard, 'e', vGap + keyHeight + vGap, hGap + 2 * keyWidth);
assertKeyValues(keyboard, 'r', vGap + keyHeight + vGap, 2 * hGap + 3 * keyWidth);
assertKeyValues(keyboard, 't', vGap + keyHeight + vGap, 3 * hGap + 4 * keyWidth);
assertKeyValues(keyboard, 'a', vGap, 0);
assertKeyValues(keyboard, 's', vGap, keyWidth);
assertKeyValues(keyboard, 'd', vGap, hGap + 2 * keyWidth);
assertKeyValues(keyboard, 'f', vGap, 2 * hGap + 3 * keyWidth);
assertKeyValues(keyboard, 'g', vGap, 3 * hGap + 4 * keyWidth);
keyboard.mirrorKeys();
// same order, mirrored X position
Assert.assertEquals(10, keyboard.getKeys().size());
assertKeyValues(keyboard, 'q', vGap + keyHeight + vGap, 5 * hGap + 4 * keyWidth);
assertKeyValues(keyboard, 'w', vGap + keyHeight + vGap, 4 * hGap + 3 * keyWidth);
assertKeyValues(keyboard, 'e', vGap + keyHeight + vGap, 3 * hGap + 2 * keyWidth);
assertKeyValues(keyboard, 'r', vGap + keyHeight + vGap, 2 * hGap + keyWidth);
assertKeyValues(keyboard, 't', vGap + keyHeight + vGap, hGap);
assertKeyValues(keyboard, 'a', vGap, 5 * hGap + 4 * keyWidth);
assertKeyValues(keyboard, 's', vGap, 4 * hGap + 3 * keyWidth);
assertKeyValues(keyboard, 'd', vGap, 3 * hGap + 2 * keyWidth);
assertKeyValues(keyboard, 'f', vGap, 2 * hGap + keyWidth);
assertKeyValues(keyboard, 'g', vGap, hGap);
}
|
public MergePolicyConfig setBatchSize(int batchSize) {
this.batchSize = checkPositive("batchSize", batchSize);
return this;
}
|
@Test
public void setBatchSize() {
config.setBatchSize(1234);
assertEquals(1234, config.getBatchSize());
}
|
@Override
public boolean sendElectionMessage(int currentId, String content) {
var nextInstance = this.findNextInstance(currentId);
var electionMessage = new Message(MessageType.ELECTION, content);
nextInstance.onMessage(electionMessage);
return true;
}
|
@Test
void testSendElectionMessage() {
try {
var instance1 = new RingInstance(null, 1, 1);
var instance2 = new RingInstance(null, 1, 2);
var instance3 = new RingInstance(null, 1, 3);
Map<Integer, Instance> instanceMap = Map.of(1, instance1, 2, instance2, 3, instance3);
var messageManager = new RingMessageManager(instanceMap);
var messageContent = "2";
messageManager.sendElectionMessage(2, messageContent);
var ringMessage = new Message(MessageType.ELECTION, messageContent);
var instanceClass = AbstractInstance.class;
var messageQueueField = instanceClass.getDeclaredField("messageQueue");
messageQueueField.setAccessible(true);
var ringMessageSent = ((Queue<Message>) messageQueueField.get(instance3)).poll();
assertEquals(ringMessageSent.getType(), ringMessage.getType());
assertEquals(ringMessageSent.getContent(), ringMessage.getContent());
} catch (NoSuchFieldException | IllegalAccessException e) {
fail("Error to access private field.");
}
}
|
public static String toMultiCrcString(final byte[] bytes) {
if (bytes.length % 4 != 0) {
throw new IllegalArgumentException((String.format(
"Unexpected byte[] length '%d' not divisible by 4. Contents: %s",
bytes.length, Arrays.toString(bytes))));
}
StringBuilder sb = new StringBuilder();
sb.append('[');
for (int i = 0; i < bytes.length; i += 4) {
sb.append(String.format("0x%08x", readInt(bytes, i)));
if (i != bytes.length - 4) {
sb.append(", ");
}
}
sb.append(']');
return sb.toString();
}
|
@Test
public void testToMultiCrcStringBadLength()
throws Exception {
LambdaTestUtils.intercept(
IllegalArgumentException.class,
"length",
() -> CrcUtil.toMultiCrcString(new byte[6]));
}
|
@Override
public void executeUpdate(final ImportMetaDataStatement sqlStatement, final ContextManager contextManager) throws SQLException {
String jsonMetaDataConfig;
if (sqlStatement.getFilePath().isPresent()) {
File file = new File(sqlStatement.getFilePath().get());
try {
jsonMetaDataConfig = FileUtils.readFileToString(file, Charset.defaultCharset());
} catch (final IOException ignore) {
throw new FileIOException(file);
}
} else {
jsonMetaDataConfig = new String(Base64.decodeBase64(sqlStatement.getMetaDataValue()));
}
ExportedClusterInfo exportedClusterInfo = JsonUtils.fromJsonString(jsonMetaDataConfig, ExportedClusterInfo.class);
ExportedMetaData exportedMetaData = exportedClusterInfo.getMetaData();
importServerConfiguration(contextManager, exportedMetaData);
importDatabase(exportedMetaData);
}
|
@Test
void assertImportEmptyMetaData() {
init(null);
ContextManager contextManager = mock(ContextManager.class, RETURNS_DEEP_STUBS);
assertThrows(EmptyStorageUnitException.class, () -> executor.executeUpdate(
new ImportMetaDataStatement(null, Objects.requireNonNull(ImportMetaDataExecutorTest.class.getResource(featureMap.get(EMPTY))).getPath()), contextManager));
}
|
@VisibleForTesting
protected void copyFromHost(MapHost host) throws IOException {
// reset retryStartTime for a new host
retryStartTime = 0;
// Get completed maps on 'host'
List<TaskAttemptID> maps = scheduler.getMapsForHost(host);
// Sanity check to catch hosts with only 'OBSOLETE' maps,
// especially at the tail of large jobs
if (maps.size() == 0) {
return;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Fetcher " + id + " going to fetch from " + host + " for: " + maps);
}
// List of maps to be fetched yet
Set<TaskAttemptID> remaining = new HashSet<TaskAttemptID>(maps);
// Construct the url and connect
URL url = getMapOutputURL(host, maps);
DataInputStream input = null;
try {
input = openShuffleUrl(host, remaining, url);
if (input == null) {
return;
}
// Loop through available map-outputs and fetch them
// On any error, faildTasks is not null and we exit
// after putting back the remaining maps to the
// yet_to_be_fetched list and marking the failed tasks.
TaskAttemptID[] failedTasks = null;
while (!remaining.isEmpty() && failedTasks == null) {
try {
failedTasks = copyMapOutput(host, input, remaining, fetchRetryEnabled);
} catch (IOException e) {
IOUtils.cleanupWithLogger(LOG, input);
//
// Setup connection again if disconnected by NM
connection.disconnect();
// Get map output from remaining tasks only.
url = getMapOutputURL(host, remaining);
input = openShuffleUrl(host, remaining, url);
if (input == null) {
return;
}
}
}
if(failedTasks != null && failedTasks.length > 0) {
LOG.warn("copyMapOutput failed for tasks "+Arrays.toString(failedTasks));
scheduler.hostFailed(host.getHostName());
for(TaskAttemptID left: failedTasks) {
scheduler.copyFailed(left, host, true, false);
}
}
// Sanity check
if (failedTasks == null && !remaining.isEmpty()) {
throw new IOException("server didn't return all expected map outputs: "
+ remaining.size() + " left.");
}
input.close();
input = null;
} finally {
if (input != null) {
IOUtils.cleanupWithLogger(LOG, input);
input = null;
}
for (TaskAttemptID left : remaining) {
scheduler.putBackKnownMapOutput(host, left);
}
}
}
|
@Test
public void testCopyFromHostIncompatibleShuffleVersionWithRetry()
throws Exception {
String replyHash = SecureShuffleUtils.generateHash(encHash.getBytes(), key);
when(connection.getResponseCode()).thenReturn(200);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_NAME))
.thenReturn("mapreduce").thenReturn("other").thenReturn("other");
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_VERSION))
.thenReturn("1.0.1").thenReturn("1.0.0").thenReturn("1.0.1");
when(connection.getHeaderField(
SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH)).thenReturn(replyHash);
ByteArrayInputStream in = new ByteArrayInputStream(new byte[0]);
when(connection.getInputStream()).thenReturn(in);
for (int i = 0; i < 3; ++i) {
Fetcher<Text,Text> underTest = new FakeFetcher<Text,Text>(jobWithRetry,
id, ss, mm, r, metrics, except, key, connection);
underTest.copyFromHost(host);
}
verify(connection, times(3)).addRequestProperty(
SecureShuffleUtils.HTTP_HEADER_URL_HASH, encHash);
verify(allErrs, times(3)).increment(1);
verify(ss, times(3)).copyFailed(map1ID, host, false, false);
verify(ss, times(3)).copyFailed(map2ID, host, false, false);
verify(ss, times(3)).putBackKnownMapOutput(any(MapHost.class), eq(map1ID));
verify(ss, times(3)).putBackKnownMapOutput(any(MapHost.class), eq(map2ID));
}
|
@Override
public GlobalRollbackResponseProto convert2Proto(GlobalRollbackResponse globalRollbackResponse) {
final short typeCode = globalRollbackResponse.getTypeCode();
final AbstractMessageProto abstractMessage = AbstractMessageProto.newBuilder().setMessageType(
MessageTypeProto.forNumber(typeCode)).build();
final String msg = globalRollbackResponse.getMsg();
final AbstractResultMessageProto abstractResultMessageProto = AbstractResultMessageProto.newBuilder().setMsg(
msg == null ? "" : msg).setResultCode(
ResultCodeProto.valueOf(globalRollbackResponse.getResultCode().name())).setAbstractMessage(abstractMessage)
.build();
AbstractTransactionResponseProto abstractTransactionResponseProto = AbstractTransactionResponseProto
.newBuilder().setAbstractResultMessage(abstractResultMessageProto).setTransactionExceptionCode(
TransactionExceptionCodeProto.valueOf(globalRollbackResponse.getTransactionExceptionCode().name()))
.build();
AbstractGlobalEndResponseProto abstractGlobalEndResponseProto = AbstractGlobalEndResponseProto.newBuilder()
.setAbstractTransactionResponse(abstractTransactionResponseProto).setGlobalStatus(
GlobalStatusProto.valueOf(globalRollbackResponse.getGlobalStatus().name())).build();
GlobalRollbackResponseProto result = GlobalRollbackResponseProto.newBuilder().setAbstractGlobalEndResponse(
abstractGlobalEndResponseProto).build();
return result;
}
|
@Test
public void convert2Proto() {
GlobalRollbackResponse globalRollbackResponse = new GlobalRollbackResponse();
globalRollbackResponse.setGlobalStatus(GlobalStatus.AsyncCommitting);
globalRollbackResponse.setMsg("msg");
globalRollbackResponse.setResultCode(ResultCode.Failed);
globalRollbackResponse.setTransactionExceptionCode(TransactionExceptionCode.BranchRegisterFailed);
GlobalRollbackResponseConvertor convertor = new GlobalRollbackResponseConvertor();
GlobalRollbackResponseProto proto = convertor.convert2Proto(
globalRollbackResponse);
GlobalRollbackResponse real = convertor.convert2Model(proto);
assertThat((real.getTypeCode())).isEqualTo(globalRollbackResponse.getTypeCode());
assertThat((real.getMsg())).isEqualTo(globalRollbackResponse.getMsg());
assertThat((real.getResultCode())).isEqualTo(globalRollbackResponse.getResultCode());
assertThat((real.getTransactionExceptionCode())).isEqualTo(
globalRollbackResponse.getTransactionExceptionCode());
}
|
public static CommandAPDU getNikGACommand() {
return new CommandAPDU(
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
);
}
|
@Test
void getNikGACommand() {
assertEquals("SSSSSSSSSSSSSSSS", Hex.toHexString(ApduFactory.getNikGACommand().getBytes()).toUpperCase());
assertEquals("2.16.528.1.1003.10.9.3.3", PolymorphType.PIP.getOid().toString());
}
|
@Override
public String getDescription() {
return "Write a file with the given filename and body.";
}
|
@Test
void testGetDescription() {
assertEquals("Write a file with the given filename and body.", writeFileAction.getDescription());
}
|
public ClusterSerdes init(Environment env,
ClustersProperties clustersProperties,
int clusterIndex) {
ClustersProperties.Cluster clusterProperties = clustersProperties.getClusters().get(clusterIndex);
log.debug("Configuring serdes for cluster {}", clusterProperties.getName());
var globalPropertiesResolver = new PropertyResolverImpl(env);
var clusterPropertiesResolver = new PropertyResolverImpl(env, "kafka.clusters." + clusterIndex);
Map<String, SerdeInstance> registeredSerdes = new LinkedHashMap<>();
// initializing serdes from config
if (clusterProperties.getSerde() != null) {
for (int i = 0; i < clusterProperties.getSerde().size(); i++) {
SerdeConfig serdeConfig = clusterProperties.getSerde().get(i);
if (Strings.isNullOrEmpty(serdeConfig.getName())) {
throw new ValidationException("'name' property not set for serde: " + serdeConfig);
}
if (registeredSerdes.containsKey(serdeConfig.getName())) {
throw new ValidationException("Multiple serdes with same name: " + serdeConfig.getName());
}
var instance = createSerdeFromConfig(
serdeConfig,
new PropertyResolverImpl(env, "kafka.clusters." + clusterIndex + ".serde." + i + ".properties"),
clusterPropertiesResolver,
globalPropertiesResolver
);
registeredSerdes.put(serdeConfig.getName(), instance);
}
}
// initializing remaining built-in serdes with empty selection patters
builtInSerdeClasses.forEach((name, clazz) -> {
if (!registeredSerdes.containsKey(name)) {
BuiltInSerde serde = createSerdeInstance(clazz);
if (autoConfigureSerde(serde, clusterPropertiesResolver, globalPropertiesResolver)) {
registeredSerdes.put(name, new SerdeInstance(name, serde, null, null, null));
}
}
});
registerTopicRelatedSerde(registeredSerdes);
return new ClusterSerdes(
registeredSerdes,
Optional.ofNullable(clusterProperties.getDefaultKeySerde())
.map(name -> Preconditions.checkNotNull(registeredSerdes.get(name), "Default key serde not found"))
.orElse(null),
Optional.ofNullable(clusterProperties.getDefaultValueSerde())
.map(name -> Preconditions.checkNotNull(registeredSerdes.get(name), "Default value serde not found"))
.or(() -> Optional.ofNullable(registeredSerdes.get(SchemaRegistrySerde.name())))
.or(() -> Optional.ofNullable(registeredSerdes.get(ProtobufFileSerde.name())))
.orElse(null),
createFallbackSerde()
);
}
|
@Test
void serdeWithBuiltInNameAndNoPropertiesCantBeInitializedIfSerdeNotSupportAutoConfigure() {
ClustersProperties.SerdeConfig serdeConfig = new ClustersProperties.SerdeConfig();
serdeConfig.setName("BuiltIn2"); //auto-configuration not supported
serdeConfig.setTopicKeysPattern("keys");
serdeConfig.setTopicValuesPattern("vals");
assertThatCode(() -> initializer.init(env, createProperties(serdeConfig), 0))
.isInstanceOf(ValidationException.class);
}
|
private static boolean canSatisfyConstraints(ApplicationId appId,
PlacementConstraint constraint, SchedulerNode node,
AllocationTagsManager atm,
Optional<DiagnosticsCollector> dcOpt)
throws InvalidAllocationTagsQueryException {
if (constraint == null) {
LOG.debug("Constraint is found empty during constraint validation for"
+ " app:{}", appId);
return true;
}
// If this is a single constraint, transform to SingleConstraint
SingleConstraintTransformer singleTransformer =
new SingleConstraintTransformer(constraint);
constraint = singleTransformer.transform();
AbstractConstraint sConstraintExpr = constraint.getConstraintExpr();
// TODO handle other type of constraints, e.g CompositeConstraint
if (sConstraintExpr instanceof SingleConstraint) {
SingleConstraint single = (SingleConstraint) sConstraintExpr;
return canSatisfySingleConstraint(appId, single, node, atm, dcOpt);
} else if (sConstraintExpr instanceof And) {
And and = (And) sConstraintExpr;
return canSatisfyAndConstraint(appId, and, node, atm, dcOpt);
} else if (sConstraintExpr instanceof Or) {
Or or = (Or) sConstraintExpr;
return canSatisfyOrConstraint(appId, or, node, atm, dcOpt);
} else {
throw new InvalidAllocationTagsQueryException(
"Unsupported type of constraint: "
+ sConstraintExpr.getClass().getSimpleName());
}
}
|
@Test
public void testNotSelfAppConstraints()
throws InvalidAllocationTagsQueryException {
long ts = System.currentTimeMillis();
ApplicationId application1 = BuilderUtils.newApplicationId(ts, 100);
ApplicationId application2 = BuilderUtils.newApplicationId(ts, 101);
ApplicationId application3 = BuilderUtils.newApplicationId(ts, 102);
ConcurrentMap<ApplicationId, RMApp> allApps = new ConcurrentHashMap<>();
allApps.put(application1, new MockRMApp(123, 1000,
RMAppState.NEW, "userA", ImmutableSet.of("")));
allApps.put(application2, new MockRMApp(124, 1001,
RMAppState.NEW, "userA", ImmutableSet.of("")));
allApps.put(application3, new MockRMApp(125, 1002,
RMAppState.NEW, "userA", ImmutableSet.of("")));
RMContext mockedContext = Mockito.spy(rmContext);
when(mockedContext.getRMApps()).thenReturn(allApps);
AllocationTagsManager tm = new AllocationTagsManager(mockedContext);
PlacementConstraintManagerService pcm =
new MemoryPlacementConstraintManager();
mockedContext.setAllocationTagsManager(tm);
mockedContext.setPlacementConstraintManager(pcm);
// Register App1 with anti-affinity constraint map.
RMNode n0r1 = rmNodes.get(0);
RMNode n1r1 = rmNodes.get(1);
RMNode n2r2 = rmNodes.get(2);
RMNode n3r2 = rmNodes.get(3);
/**
* Place container:
* n0: app1/A(1), app2/A(1)
* n1: app3/A(3)
* n2: app1/A(2)
* n3: ""
*/
tm.addContainer(n0r1.getNodeID(),
newContainerId(application1), ImmutableSet.of("A"));
tm.addContainer(n0r1.getNodeID(),
newContainerId(application2), ImmutableSet.of("A"));
tm.addContainer(n1r1.getNodeID(),
newContainerId(application3), ImmutableSet.of("A"));
tm.addContainer(n1r1.getNodeID(),
newContainerId(application3), ImmutableSet.of("A"));
tm.addContainer(n1r1.getNodeID(),
newContainerId(application3), ImmutableSet.of("A"));
tm.addContainer(n2r2.getNodeID(),
newContainerId(application1), ImmutableSet.of("A"));
tm.addContainer(n2r2.getNodeID(),
newContainerId(application1), ImmutableSet.of("A"));
SchedulerNode schedulerNode0 = newSchedulerNode(n0r1.getHostName(),
n0r1.getRackName(), n0r1.getNodeID());
SchedulerNode schedulerNode1 = newSchedulerNode(n1r1.getHostName(),
n1r1.getRackName(), n1r1.getNodeID());
SchedulerNode schedulerNode2 = newSchedulerNode(n2r2.getHostName(),
n2r2.getRackName(), n2r2.getNodeID());
SchedulerNode schedulerNode3 = newSchedulerNode(n3r2.getHostName(),
n3r2.getRackName(), n3r2.getNodeID());
TargetApplicationsNamespace notSelf =
new TargetApplicationsNamespace.NotSelf();
//***************************
// 1) not-self, app1
//***************************
// Anti-affinity with "A" from app2 and app3,
// n0 and n1 both have tag "A" from either app2 or app3, so they are
// not qualified for the placement.
PlacementConstraint constraint1 = PlacementConstraints.targetNotIn(
NODE, allocationTagWithNamespace(notSelf.toString(), "A"))
.build();
Map<Set<String>, PlacementConstraint> constraintMap = new HashMap<>();
Set<String> srcTags1 = ImmutableSet.of("A");
constraintMap.put(srcTags1, constraint1);
pcm.registerApplication(application1, constraintMap);
Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(
application1, createSchedulingRequest(srcTags1),
schedulerNode0, pcm, tm));
Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(
application1, createSchedulingRequest(srcTags1),
schedulerNode1, pcm, tm));
Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(
application1, createSchedulingRequest(srcTags1),
schedulerNode2, pcm, tm));
Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(
application1, createSchedulingRequest(srcTags1),
schedulerNode3, pcm, tm));
pcm.unregisterApplication(application1);
//***************************
// 2) not-self, app1
//***************************
// Affinity with "A" from app2 and app3,
// N0 and n1 are qualified for the placement.
PlacementConstraint constraint2 = PlacementConstraints.targetIn(
NODE, allocationTagWithNamespace(notSelf.toString(), "A"))
.build();
Map<Set<String>, PlacementConstraint> cm2 = new HashMap<>();
Set<String> srcTags2 = ImmutableSet.of("A");
cm2.put(srcTags2, constraint2);
pcm.registerApplication(application1, cm2);
Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(
application1, createSchedulingRequest(srcTags2),
schedulerNode0, pcm, tm));
Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(
application1, createSchedulingRequest(srcTags2),
schedulerNode1, pcm, tm));
Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(
application1, createSchedulingRequest(srcTags2),
schedulerNode2, pcm, tm));
Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(
application1, createSchedulingRequest(srcTags2),
schedulerNode3, pcm, tm));
pcm.unregisterApplication(application1);
}
|
@Override
public void handleAsyncException(String message, Throwable exception) {
if (isRestoring || isRunning) {
// only fail if the task is still in restoring or running
asyncExceptionHandler.handleAsyncException(message, exception);
}
}
|
@Test
void testAsyncExceptionHandlerHandleExceptionForwardsMessageProperly() {
MockEnvironment mockEnvironment = MockEnvironment.builder().build();
RuntimeException expectedException = new RuntimeException("RUNTIME EXCEPTION");
final StreamTask.StreamTaskAsyncExceptionHandler asyncExceptionHandler =
new StreamTask.StreamTaskAsyncExceptionHandler(mockEnvironment);
mockEnvironment.setExpectedExternalFailureCause(AsynchronousException.class);
final String expectedErrorMessage = "EXPECTED_ERROR MESSAGE";
asyncExceptionHandler.handleAsyncException(expectedErrorMessage, expectedException);
// expect an AsynchronousException containing the supplied error details
Optional<? extends Throwable> actualExternalFailureCause =
mockEnvironment.getActualExternalFailureCause();
final Throwable actualException =
actualExternalFailureCause.orElseThrow(
() -> new AssertionError("Expected exceptional completion"));
assertThat(actualException)
.isInstanceOf(AsynchronousException.class)
.hasMessage(expectedErrorMessage)
.hasCause(expectedException);
}
|
@PostMapping("/config")
public Result<Boolean> addConfig(@RequestBody ConfigInfo configInfo) {
if (StringUtils.isEmpty(configInfo.getGroup()) || StringUtils.isEmpty(configInfo.getKey())
|| StringUtils.isEmpty(configInfo.getContent())) {
return new Result<>(ResultCodeType.MISS_PARAM.getCode(), ResultCodeType.MISS_PARAM.getMessage());
}
Result<List<ConfigInfo>> result = configService.getConfigList(configInfo, PluginType.OTHER, true);
if (CollectionUtils.isEmpty(result.getData())) {
return configService.publishConfig(configInfo);
}
return new Result<>(ResultCodeType.EXISTS.getCode(), ResultCodeType.EXISTS.getMessage());
}
|
@Test
public void addConfig() {
Result<Boolean> result = configController.addConfig(configInfo);
Assert.assertFalse(result.isSuccess());
Result<Boolean> result1 = configController.addConfig(addConfigInfo);
Assert.assertTrue(result1.isSuccess());
}
|
@Override
String getFileName(double lat, double lon) {
int intKey = calcIntKey(lat, lon);
String str = areas.get(intKey);
if (str == null)
return null;
int minLat = Math.abs(down(lat));
int minLon = Math.abs(down(lon));
str += "/";
if (lat >= 0)
str += "N";
else
str += "S";
if (minLat < 10)
str += "0";
str += minLat;
if (lon >= 0)
str += "E";
else
str += "W";
if (minLon < 10)
str += "0";
if (minLon < 100)
str += "0";
str += minLon;
return str;
}
|
@Disabled
@Test
public void testGetEleVerticalBorder() {
instance = new SRTMProvider();
// Border between the tiles N42E011 and N43E011
assertEquals("Eurasia/N42E011", instance.getFileName(42.999999, 11.48));
assertEquals(419, instance.getEle(42.999999, 11.48), precision);
assertEquals("Eurasia/N43E011", instance.getFileName(43.000001, 11.48));
assertEquals(419, instance.getEle(43.000001, 11.48), precision);
}
|
@Override
public void createEndpoints(Endpoints endpoints) {
checkNotNull(endpoints, ERR_NULL_ENDPOINTS);
checkArgument(!Strings.isNullOrEmpty(endpoints.getMetadata().getUid()),
ERR_NULL_ENDPOINTS_UID);
k8sEndpointsStore.createEndpoints(endpoints);
log.info(String.format(MSG_ENDPOINTS, endpoints.getMetadata().getName(), MSG_CREATED));
}
|
@Test(expected = IllegalArgumentException.class)
public void testCreateDuplicateEndpoints() {
target.createEndpoints(ENDPOINTS);
target.createEndpoints(ENDPOINTS);
}
|
protected Object getValidJMSHeaderValue(String headerName, Object headerValue) {
if (headerValue instanceof String) {
return headerValue;
} else if (headerValue instanceof BigInteger) {
return headerValue.toString();
} else if (headerValue instanceof BigDecimal) {
return headerValue.toString();
} else if (headerValue instanceof Number) {
return headerValue;
} else if (headerValue instanceof Character) {
return headerValue;
} else if (headerValue instanceof CharSequence) {
return headerValue.toString();
} else if (headerValue instanceof Boolean) {
return headerValue;
} else if (headerValue instanceof Date) {
if (this.endpoint.getConfiguration().isFormatDateHeadersToIso8601()) {
return ZonedDateTime.ofInstant(((Date) headerValue).toInstant(), ZoneOffset.UTC).toString();
} else {
return headerValue.toString();
}
}
return null;
}
|
@Test
public void testGetValidJmsHeaderValueWithBigIntegerShouldSucceed() {
Object value = jmsBindingUnderTest.getValidJMSHeaderValue("foo", new BigInteger("12345"));
assertEquals("12345", value);
}
|
@GetMapping(value = "/configs")
@Secured(action = ActionTypes.READ, signType = SignType.CONFIG)
public Result<List<ConfigInfoWrapper>> getConfigsByTenant(@RequestParam("namespaceId") String namespaceId)
throws NacosApiException {
// check namespaceId
ParamUtils.checkTenantV2(namespaceId);
namespaceId = NamespaceUtil.processNamespaceParameter(namespaceId);
return Result.success(historyService.getConfigListByNamespace(namespaceId));
}
|
@Test
void testGetConfigListByNamespaceWhenIsPublic() throws NacosApiException {
ConfigInfoWrapper configInfoWrapper = new ConfigInfoWrapper();
configInfoWrapper.setDataId("test");
configInfoWrapper.setGroup("test");
configInfoWrapper.setContent("test");
List<ConfigInfoWrapper> configInfoWrappers = Collections.singletonList(configInfoWrapper);
when(historyService.getConfigListByNamespace(TEST_NAMESPACE_ID)).thenReturn(configInfoWrappers);
Result<List<ConfigInfoWrapper>> result = historyControllerV2.getConfigsByTenant(TEST_NAMESPACE_ID_PUBLIC);
verify(historyService).getConfigListByNamespace(TEST_NAMESPACE_ID);
assertEquals(ErrorCode.SUCCESS.getCode(), result.getCode());
List<ConfigInfoWrapper> actualList = result.getData();
assertEquals(configInfoWrappers.size(), actualList.size());
ConfigInfoWrapper actualConfigInfoWrapper = actualList.get(0);
assertEquals(configInfoWrapper.getDataId(), actualConfigInfoWrapper.getDataId());
assertEquals(configInfoWrapper.getGroup(), actualConfigInfoWrapper.getGroup());
assertEquals(configInfoWrapper.getContent(), actualConfigInfoWrapper.getContent());
}
|
static ProjectMeasuresQuery newProjectMeasuresQuery(List<Criterion> criteria, @Nullable Set<String> projectUuids) {
ProjectMeasuresQuery query = new ProjectMeasuresQuery();
Optional.ofNullable(projectUuids).ifPresent(query::setProjectUuids);
criteria.forEach(criterion -> processCriterion(criterion, query));
return query;
}
|
@Test
public void create_query() {
List<Criterion> criteria = asList(
Criterion.builder().setKey("ncloc").setOperator(GT).setValue("10").build(),
Criterion.builder().setKey("coverage").setOperator(LTE).setValue("80").build());
ProjectMeasuresQuery underTest = newProjectMeasuresQuery(criteria, emptySet());
assertThat(underTest.getMetricCriteria())
.extracting(MetricCriterion::getMetricKey, MetricCriterion::getOperator, MetricCriterion::getValue)
.containsOnly(
tuple("ncloc", GT, 10d),
tuple("coverage", Operator.LTE, 80d));
}
|
@Override
public Map<String, String> generationCodes(Long tableId) {
// 校验是否已经存在
CodegenTableDO table = codegenTableMapper.selectById(tableId);
if (table == null) {
throw exception(CODEGEN_TABLE_NOT_EXISTS);
}
List<CodegenColumnDO> columns = codegenColumnMapper.selectListByTableId(tableId);
if (CollUtil.isEmpty(columns)) {
throw exception(CODEGEN_COLUMN_NOT_EXISTS);
}
// 如果是主子表,则加载对应的子表信息
List<CodegenTableDO> subTables = null;
List<List<CodegenColumnDO>> subColumnsList = null;
if (CodegenTemplateTypeEnum.isMaster(table.getTemplateType())) {
// 校验子表存在
subTables = codegenTableMapper.selectListByTemplateTypeAndMasterTableId(
CodegenTemplateTypeEnum.SUB.getType(), tableId);
if (CollUtil.isEmpty(subTables)) {
throw exception(CODEGEN_MASTER_GENERATION_FAIL_NO_SUB_TABLE);
}
// 校验子表的关联字段存在
subColumnsList = new ArrayList<>();
for (CodegenTableDO subTable : subTables) {
List<CodegenColumnDO> subColumns = codegenColumnMapper.selectListByTableId(subTable.getId());
if (CollUtil.findOne(subColumns, column -> column.getId().equals(subTable.getSubJoinColumnId())) == null) {
throw exception(CODEGEN_SUB_COLUMN_NOT_EXISTS, subTable.getId());
}
subColumnsList.add(subColumns);
}
}
// 执行生成
return codegenEngine.execute(table, columns, subTables, subColumnsList);
}
|
@Test
public void testGenerationCodes_columnNotExists() {
// mock 数据(CodegenTableDO)
CodegenTableDO table = randomPojo(CodegenTableDO.class,
o -> o.setScene(CodegenSceneEnum.ADMIN.getScene())
.setTemplateType(CodegenTemplateTypeEnum.MASTER_NORMAL.getType()));
codegenTableMapper.insert(table);
// 准备参数
Long tableId = table.getId();
// 调用,并断言
assertServiceException(() -> codegenService.generationCodes(tableId),
CODEGEN_COLUMN_NOT_EXISTS);
}
|
OffsetAndEpoch findHighestRemoteOffset(TopicIdPartition topicIdPartition, UnifiedLog log) throws RemoteStorageException {
OffsetAndEpoch offsetAndEpoch = null;
Option<LeaderEpochFileCache> leaderEpochCacheOpt = log.leaderEpochCache();
if (leaderEpochCacheOpt.isDefined()) {
LeaderEpochFileCache cache = leaderEpochCacheOpt.get();
Optional<EpochEntry> maybeEpochEntry = cache.latestEntry();
while (offsetAndEpoch == null && maybeEpochEntry.isPresent()) {
int epoch = maybeEpochEntry.get().epoch;
Optional<Long> highestRemoteOffsetOpt =
remoteLogMetadataManager.highestOffsetForEpoch(topicIdPartition, epoch);
if (highestRemoteOffsetOpt.isPresent()) {
Map.Entry<Integer, Long> entry = cache.endOffsetFor(epoch, log.logEndOffset());
int requestedEpoch = entry.getKey();
long endOffset = entry.getValue();
long highestRemoteOffset = highestRemoteOffsetOpt.get();
if (endOffset <= highestRemoteOffset) {
LOGGER.info("The end-offset for epoch {}: ({}, {}) is less than or equal to the " +
"highest-remote-offset: {} for partition: {}", epoch, requestedEpoch, endOffset,
highestRemoteOffset, topicIdPartition);
offsetAndEpoch = new OffsetAndEpoch(endOffset - 1, requestedEpoch);
} else {
offsetAndEpoch = new OffsetAndEpoch(highestRemoteOffset, epoch);
}
}
maybeEpochEntry = cache.previousEntry(epoch);
}
}
if (offsetAndEpoch == null) {
offsetAndEpoch = new OffsetAndEpoch(-1L, RecordBatch.NO_PARTITION_LEADER_EPOCH);
}
return offsetAndEpoch;
}
|
@Test
void testFindHighestRemoteOffsetWithUncleanLeaderElection() throws RemoteStorageException {
List<EpochEntry> totalEpochEntries = Arrays.asList(
new EpochEntry(0, 0),
new EpochEntry(1, 150),
new EpochEntry(2, 300)
);
checkpoint.write(totalEpochEntries);
LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler);
when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache));
TopicIdPartition tpId = new TopicIdPartition(Uuid.randomUuid(), tp);
when(remoteLogMetadataManager.highestOffsetForEpoch(eq(tpId), anyInt())).thenAnswer(ans -> {
Integer epoch = ans.getArgument(1, Integer.class);
if (epoch == 0) {
return Optional.of(200L);
} else {
return Optional.empty();
}
});
OffsetAndEpoch offsetAndEpoch = remoteLogManager.findHighestRemoteOffset(tpId, mockLog);
assertEquals(new OffsetAndEpoch(149L, 0), offsetAndEpoch);
}
|
public Map<String, String> build() {
Map<String, String> builder = new HashMap<>();
configureFileSystem(builder);
configureNetwork(builder);
configureCluster(builder);
configureSecurity(builder);
configureOthers(builder);
LOGGER.info("Elasticsearch listening on [HTTP: {}:{}, TCP: {}:{}]",
builder.get(ES_HTTP_HOST_KEY), builder.get(ES_HTTP_PORT_KEY),
builder.get(ES_TRANSPORT_HOST_KEY), builder.get(ES_TRANSPORT_PORT_KEY));
return builder;
}
|
@Test
public void test_default_settings_for_cluster_mode() throws Exception {
File homeDir = temp.newFolder();
Props props = new Props(new Properties());
props.set(SEARCH_PORT.getKey(), "1234");
props.set(SEARCH_HOST.getKey(), "127.0.0.1");
props.set(CLUSTER_NODE_SEARCH_HOST.getKey(), "127.0.0.1");
props.set(CLUSTER_NODE_SEARCH_PORT.getKey(), "1234");
props.set(CLUSTER_NODE_ES_HOST.getKey(), "127.0.0.1");
props.set(CLUSTER_NODE_ES_PORT.getKey(), "1234");
props.set(PATH_HOME.getKey(), homeDir.getAbsolutePath());
props.set(PATH_DATA.getKey(), temp.newFolder().getAbsolutePath());
props.set(PATH_TEMP.getKey(), temp.newFolder().getAbsolutePath());
props.set(PATH_LOGS.getKey(), temp.newFolder().getAbsolutePath());
props.set(CLUSTER_NAME.getKey(), "sonarqube-1");
props.set(Property.CLUSTER_ENABLED.getKey(), "true");
props.set(CLUSTER_NODE_NAME.getKey(), "node-1");
EsSettings esSettings = new EsSettings(props, new EsInstallation(props), system);
Map<String, String> generated = esSettings.build();
assertThat(generated)
.containsEntry("cluster.name", "sonarqube-1")
.containsEntry("node.name", "node-1");
}
|
public static String substVars(String val, PropertyContainer pc1) {
return substVars(val, pc1, null);
}
|
@Test(timeout = 1000)
public void detectCircularReferences3() {
context.putProperty("A", "${B}");
context.putProperty("B", "${C}");
context.putProperty("C", "${A}");
expectedException.expect(IllegalArgumentException.class);
expectedException.expectMessage("Circular variable reference detected while parsing input [${B} --> ${C} --> ${A} --> ${B}]");
OptionHelper.substVars("${B} ", context);
}
|
@Override
public void discardState() throws Exception {
Exception aggregatedExceptions = null;
if (jobManagerOwnedSnapshot != null) {
try {
jobManagerOwnedSnapshot.discardState();
} catch (Exception remoteDiscardEx) {
aggregatedExceptions = remoteDiscardEx;
}
}
if (taskLocalSnapshot != null) {
try {
taskLocalSnapshot.discardState();
} catch (Exception localDiscardEx) {
aggregatedExceptions =
ExceptionUtils.firstOrSuppressed(localDiscardEx, aggregatedExceptions);
}
}
if (aggregatedExceptions != null) {
throw aggregatedExceptions;
}
}
|
@Test
void discardState() throws Exception {
SnapshotResult<StateObject> result =
SnapshotResult.withLocalState(mock(StateObject.class), mock(StateObject.class));
result.discardState();
verify(result.getJobManagerOwnedSnapshot()).discardState();
verify(result.getTaskLocalSnapshot()).discardState();
}
|
@Override
public Multimap<String, String> findBundlesForUnloading(final LoadData loadData, final ServiceConfiguration conf) {
selectedBundlesCache.clear();
Map<String, BrokerData> brokersData = loadData.getBrokerData();
Map<String, BundleData> loadBundleData = loadData.getBundleDataForLoadShedding();
Map<String, Long> recentlyUnloadedBundles = loadData.getRecentlyUnloadedBundles();
MutableObject<String> msgRateOverloadedBroker = new MutableObject<>();
MutableObject<String> msgThroughputOverloadedBroker = new MutableObject<>();
MutableObject<String> msgRateUnderloadedBroker = new MutableObject<>();
MutableObject<String> msgThroughputUnderloadedBroker = new MutableObject<>();
MutableDouble maxMsgRate = new MutableDouble(-1);
MutableDouble maxThroughput = new MutableDouble(-1);
MutableDouble minMsgRate = new MutableDouble(Integer.MAX_VALUE);
MutableDouble minThroughput = new MutableDouble(Integer.MAX_VALUE);
brokersData.forEach((broker, data) -> {
double msgRate = data.getLocalData().getMsgRateIn() + data.getLocalData().getMsgRateOut();
double throughputRate = data.getLocalData().getMsgThroughputIn()
+ data.getLocalData().getMsgThroughputOut();
if (msgRate > maxMsgRate.getValue()) {
msgRateOverloadedBroker.setValue(broker);
maxMsgRate.setValue(msgRate);
}
if (throughputRate > maxThroughput.getValue()) {
msgThroughputOverloadedBroker.setValue(broker);
maxThroughput.setValue(throughputRate);
}
if (msgRate < minMsgRate.getValue()) {
msgRateUnderloadedBroker.setValue(broker);
minMsgRate.setValue(msgRate);
}
if (throughputRate < minThroughput.getValue()) {
msgThroughputUnderloadedBroker.setValue(broker);
minThroughput.setValue(throughputRate);
}
});
// find the difference between two brokers based on msgRate and throughout and check if the load distribution
// discrepancy is higher than threshold. if that matches then try to unload bundle from overloaded brokers to
// give chance of uniform load distribution.
if (minMsgRate.getValue() <= EPS && minMsgRate.getValue() >= -EPS) {
minMsgRate.setValue(1.0);
}
if (minThroughput.getValue() <= EPS && minThroughput.getValue() >= -EPS) {
minThroughput.setValue(1.0);
}
double msgRateDifferencePercentage = ((maxMsgRate.getValue() - minMsgRate.getValue()) * 100)
/ (minMsgRate.getValue());
double msgThroughputDifferenceRate = maxThroughput.getValue() / minThroughput.getValue();
// if the threshold matches then find out how much load needs to be unloaded by considering number of msgRate
// and throughput.
boolean isMsgRateThresholdExceeded = conf.getLoadBalancerMsgRateDifferenceShedderThreshold() > 0
&& msgRateDifferencePercentage > conf.getLoadBalancerMsgRateDifferenceShedderThreshold();
boolean isMsgThroughputThresholdExceeded = conf
.getLoadBalancerMsgThroughputMultiplierDifferenceShedderThreshold() > 0
&& msgThroughputDifferenceRate > conf
.getLoadBalancerMsgThroughputMultiplierDifferenceShedderThreshold();
if (isMsgRateThresholdExceeded || isMsgThroughputThresholdExceeded) {
MutableInt msgRateRequiredFromUnloadedBundles = new MutableInt(
(int) ((maxMsgRate.getValue() - minMsgRate.getValue()) * conf.getMaxUnloadPercentage()));
MutableInt msgThroughputRequiredFromUnloadedBundles = new MutableInt(
(int) ((maxThroughput.getValue() - minThroughput.getValue())
* conf.getMaxUnloadPercentage()));
if (isMsgRateThresholdExceeded) {
if (log.isDebugEnabled()) {
log.debug("Found bundles for uniform load balancing. "
+ "msgRate overloaded broker: {} with msgRate: {}, "
+ "msgRate underloaded broker: {} with msgRate: {}",
msgRateOverloadedBroker.getValue(), maxMsgRate.getValue(),
msgRateUnderloadedBroker.getValue(), minMsgRate.getValue());
}
LocalBrokerData overloadedBrokerData =
brokersData.get(msgRateOverloadedBroker.getValue()).getLocalData();
if (overloadedBrokerData.getBundles().size() > 1
&& (msgRateRequiredFromUnloadedBundles.getValue() >= conf.getMinUnloadMessage())) {
// Sort bundles by msgRate, then pick the bundle which can help to reduce load uniformly with
// under-loaded broker
loadBundleData.entrySet().stream()
.filter(e -> overloadedBrokerData.getBundles().contains(e.getKey()))
.map((e) -> {
String bundle = e.getKey();
TimeAverageMessageData shortTermData = e.getValue().getShortTermData();
double msgRate = shortTermData.getMsgRateIn() + shortTermData.getMsgRateOut();
return Pair.of(bundle, msgRate);
}).filter(e -> !recentlyUnloadedBundles.containsKey(e.getLeft()))
.sorted((e1, e2) -> Double.compare(e2.getRight(), e1.getRight())).forEach((e) -> {
if (conf.getMaxUnloadBundleNumPerShedding() != -1
&& selectedBundlesCache.size() >= conf.getMaxUnloadBundleNumPerShedding()) {
return;
}
String bundle = e.getLeft();
double bundleMsgRate = e.getRight();
if (bundleMsgRate <= (msgRateRequiredFromUnloadedBundles.getValue()
+ 1000/* delta */)) {
log.info("Found bundle to unload with msgRate {}", bundleMsgRate);
msgRateRequiredFromUnloadedBundles.add(-bundleMsgRate);
selectedBundlesCache.put(msgRateOverloadedBroker.getValue(), bundle);
}
});
}
} else {
if (log.isDebugEnabled()) {
log.debug("Found bundles for uniform load balancing. "
+ "msgThroughput overloaded broker: {} with msgThroughput {}, "
+ "msgThroughput underloaded broker: {} with msgThroughput: {}",
msgThroughputOverloadedBroker.getValue(), maxThroughput.getValue(),
msgThroughputUnderloadedBroker.getValue(), minThroughput.getValue());
}
LocalBrokerData overloadedBrokerData =
brokersData.get(msgThroughputOverloadedBroker.getValue()).getLocalData();
if (overloadedBrokerData.getBundles().size() > 1
&&
msgThroughputRequiredFromUnloadedBundles.getValue() >= conf.getMinUnloadMessageThroughput()) {
// Sort bundles by throughput, then pick the bundle which can help to reduce load uniformly with
// under-loaded broker
loadBundleData.entrySet().stream()
.filter(e -> overloadedBrokerData.getBundles().contains(e.getKey()))
.map((e) -> {
String bundle = e.getKey();
TimeAverageMessageData shortTermData = e.getValue().getShortTermData();
double msgThroughput = shortTermData.getMsgThroughputIn()
+ shortTermData.getMsgThroughputOut();
return Pair.of(bundle, msgThroughput);
}).filter(e -> !recentlyUnloadedBundles.containsKey(e.getLeft()))
.sorted((e1, e2) -> Double.compare(e2.getRight(), e1.getRight())).forEach((e) -> {
if (conf.getMaxUnloadBundleNumPerShedding() != -1
&& selectedBundlesCache.size() >= conf.getMaxUnloadBundleNumPerShedding()) {
return;
}
String bundle = e.getLeft();
double msgThroughput = e.getRight();
if (msgThroughput <= (msgThroughputRequiredFromUnloadedBundles.getValue()
+ 1000/* delta */)) {
log.info("Found bundle to unload with msgThroughput {}", msgThroughput);
msgThroughputRequiredFromUnloadedBundles.add(-msgThroughput);
selectedBundlesCache.put(msgThroughputOverloadedBroker.getValue(), bundle);
}
});
}
}
}
return selectedBundlesCache;
}
|
@Test
public void testBrokerWithMultipleBundles() {
int numBundles = 10;
LoadData loadData = new LoadData();
LocalBrokerData broker1 = new LocalBrokerData();
LocalBrokerData broker2 = new LocalBrokerData();
String broker2Name = "broker2";
double brokerThroughput = 0;
for (int i = 1; i <= numBundles; ++i) {
broker1.getBundles().add("bundle-" + i);
BundleData bundle = new BundleData();
TimeAverageMessageData timeAverageMessageData = new TimeAverageMessageData();
double throughput = i * 1024 * 1024;
timeAverageMessageData.setMsgThroughputIn(throughput);
timeAverageMessageData.setMsgThroughputOut(throughput);
bundle.setShortTermData(timeAverageMessageData);
loadData.getBundleData().put("bundle-" + i, bundle);
brokerThroughput += throughput;
}
broker1.setMsgThroughputIn(brokerThroughput);
broker1.setMsgThroughputOut(brokerThroughput);
loadData.getBrokerData().put("broker-1", new BrokerData(broker1));
loadData.getBrokerData().put(broker2Name, new BrokerData(broker2));
Multimap<String, String> bundlesToUnload = uniformLoadShedder.findBundlesForUnloading(loadData, conf);
assertFalse(bundlesToUnload.isEmpty());
}
|
@Override
public Result reconcile(Request request) {
return client.fetch(Backup.class, request.name())
.map(backup -> {
var metadata = backup.getMetadata();
var status = backup.getStatus();
var spec = backup.getSpec();
if (isDeleted(backup)) {
if (removeFinalizers(metadata, Set.of(HOUSE_KEEPER_FINALIZER))) {
migrationService.cleanup(backup).block();
client.update(backup);
}
return doNotRetry();
}
if (addFinalizers(metadata, Set.of(HOUSE_KEEPER_FINALIZER))) {
client.update(backup);
}
if (Phase.PENDING.equals(status.getPhase())) {
// Do backup
try {
status.setPhase(Phase.RUNNING);
status.setStartTimestamp(Instant.now(clock));
updateStatus(request.name(), status);
// Long period execution when backing up
migrationService.backup(backup).block();
status.setPhase(Phase.SUCCEEDED);
status.setCompletionTimestamp(Instant.now(clock));
updateStatus(request.name(), status);
} catch (Throwable t) {
var unwrapped = Exceptions.unwrap(t);
log.error("Failed to backup", unwrapped);
// Only happen when shutting down
status.setPhase(Phase.FAILED);
if (unwrapped instanceof InterruptedException) {
status.setFailureReason("Interrupted");
status.setFailureMessage("The backup process was interrupted.");
} else {
status.setFailureReason("SystemError");
status.setFailureMessage(
"Something went wrong! Error message: " + unwrapped.getMessage());
}
updateStatus(request.name(), status);
}
}
// Only happen when failing to update status when interrupted
if (Phase.RUNNING.equals(status.getPhase())) {
status.setPhase(Phase.FAILED);
status.setFailureReason("UnexpectedExit");
status.setFailureMessage("The backup process may exit abnormally.");
updateStatus(request.name(), status);
}
// Check the expires at and requeue if necessary
if (isTerminal(status.getPhase())) {
var expiresAt = spec.getExpiresAt();
if (expiresAt != null) {
var now = Instant.now(clock);
if (now.isBefore(expiresAt)) {
return new Result(true, Duration.between(now, expiresAt));
}
client.delete(backup);
}
}
return doNotRetry();
}).orElseGet(Result::doNotRetry);
}
|
@Test
void somethingWentWrongWhenBackup() {
var name = "fake-backup";
var backup = createPureBackup(name);
backup.getSpec().setFormat("zip");
when(client.fetch(Backup.class, name)).thenReturn(Optional.of(backup));
doNothing().when(client).update(backup);
when(migrationService.backup(backup))
.thenReturn(Mono.error(Exceptions.propagate(new IOException("File not found"))));
var result = reconciler.reconcile(new Reconciler.Request(name));
assertNotNull(result);
assertFalse(result.reEnqueue());
var status = backup.getStatus();
assertEquals(Backup.Phase.FAILED, status.getPhase());
assertNotNull(status.getStartTimestamp());
assertNull(status.getCompletionTimestamp());
assertEquals("SystemError", status.getFailureReason());
// 1. query
// 2. pending -> running
// 3. running -> failed
verify(client, times(3)).fetch(Backup.class, name);
verify(client, times(3)).update(backup);
verify(migrationService).backup(backup);
}
|
public abstract long observeWm(int queueIndex, long wmValue);
|
@Test
public void when_allIdleAndDuplicateIdleMessage_then_processed() {
// Duplicate idle messages are possible in this scenario:
// A source instance emits IDLE_MESSAGE, then an event (not causing a WM) and then another
// IDLE_MESSAGE again. The IDLE_MESSAGE is broadcast, but the event is not. So a downstream
// instance can receive two IDLE_MESSAGE-s in a row.
assertEquals(Long.MIN_VALUE, wc.observeWm(0, IDLE_MESSAGE.timestamp()));
assertEquals(NO_NEW_WM, wc.observeWm(1, IDLE_MESSAGE.timestamp()));
assertEquals(Long.MIN_VALUE, wc.observeWm(0, IDLE_MESSAGE.timestamp()));
}
|
int getLowestObservedDistributionBits() {
return lowestObservedDistributionBits;
}
|
@Test
void lowest_observed_distribution_bit_is_initially_16() {
final StateVersionTracker versionTracker = createWithMockedMetrics();
assertEquals(16, versionTracker.getLowestObservedDistributionBits());
}
|
public static Single<Uri> proxy(@NonNull Context context, @NonNull Uri data) {
return Single.just(data)
.subscribeOn(RxSchedulers.background())
.observeOn(RxSchedulers.mainThread())
.map(remoteUri -> proxyContentUriToLocalFileUri(context, remoteUri));
}
|
@Test
@Config(shadows = ShadowFileProvider.class)
public void testHappyPathKnownMime() throws IOException {
var shadowMimeTypeMap = Shadows.shadowOf(MimeTypeMap.getSingleton());
shadowMimeTypeMap.addExtensionMimeTypeMapping("png", "image/png");
final var uriSingle = LocalProxy.proxy(ApplicationProvider.getApplicationContext(), mUri);
final Uri localUri = TestRxSchedulers.blockingGet(uriSingle);
Assert.assertNotNull(localUri);
Assert.assertEquals("content", localUri.getScheme());
Assert.assertEquals("com.anysoftkeyboard.fileprovider.test", localUri.getAuthority());
Assert.assertTrue(
localUri.getPath() + " should have a different value!",
localUri
.getPath()
.endsWith("com.anysoftkeyboard.fileprovider.test-dataDir/files/media/file.png.png"));
File actualFile = new File(localUri.getPath());
Assert.assertTrue(
"File " + actualFile.getAbsolutePath() + " does not exist", actualFile.isFile());
final List<String> copiedData = Files.readLines(actualFile, Charsets.UTF8);
Assert.assertEquals(1, copiedData.size());
Assert.assertEquals("testing 123", copiedData.get(0));
}
|
public static Duration parseDuration(String text) {
checkNotNull(text);
final String trimmed = text.trim();
checkArgument(!trimmed.isEmpty(), "argument is an empty- or whitespace-only string");
final int len = trimmed.length();
int pos = 0;
char current;
while (pos < len && (current = trimmed.charAt(pos)) >= '0' && current <= '9') {
pos++;
}
final String number = trimmed.substring(0, pos);
final String unitLabel = trimmed.substring(pos).trim().toLowerCase(Locale.US);
if (number.isEmpty()) {
throw new NumberFormatException("text does not start with a number");
}
final BigInteger value;
try {
value = new BigInteger(number); // this throws a NumberFormatException
} catch (NumberFormatException e) {
throw new IllegalArgumentException(
"The value '" + number + "' cannot be represented as an integer number.", e);
}
final ChronoUnit unit;
if (unitLabel.isEmpty()) {
unit = ChronoUnit.MILLIS;
} else {
unit = LABEL_TO_UNIT_MAP.get(unitLabel);
}
if (unit == null) {
throw new IllegalArgumentException(
"Time interval unit label '"
+ unitLabel
+ "' does not match any of the recognized units: "
+ TimeUnit.getAllUnits());
}
try {
return convertBigIntToDuration(value, unit);
} catch (ArithmeticException e) {
throw new IllegalArgumentException(
"The value '"
+ number
+ "' cannot be represented as java.time.Duration (numeric overflow).",
e);
}
}
|
@Test
void testParseDurationSeconds() {
assertThat(TimeUtils.parseDuration("667766s").getSeconds()).isEqualTo(667766);
assertThat(TimeUtils.parseDuration("667766sec").getSeconds()).isEqualTo(667766);
assertThat(TimeUtils.parseDuration("667766secs").getSeconds()).isEqualTo(667766);
assertThat(TimeUtils.parseDuration("667766second").getSeconds()).isEqualTo(667766);
assertThat(TimeUtils.parseDuration("667766seconds").getSeconds()).isEqualTo(667766);
assertThat(TimeUtils.parseDuration("667766 s").getSeconds()).isEqualTo(667766);
}
|
@Override
public InputStream getAsciiStream(final int columnIndex) throws SQLException {
return mergeResultSet.getInputStream(columnIndex, ASCII);
}
|
@Test
void assertGetAsciiStreamWithColumnIndex() throws SQLException {
InputStream inputStream = mock(InputStream.class);
when(mergeResultSet.getInputStream(1, "Ascii")).thenReturn(inputStream);
assertThat(shardingSphereResultSet.getAsciiStream(1), instanceOf(InputStream.class));
}
|
public void setPolicyManager(FederationPolicyManager policyManager)
throws YarnException {
if (policyManager == null) {
LOG.warn("Attempting to set null policy manager");
return;
}
// Extract the configuration from the policy manager
String queue = policyManager.getQueue();
SubClusterPolicyConfiguration conf;
try {
conf = policyManager.serializeConf();
} catch (FederationPolicyInitializationException e) {
LOG.warn("Error serializing policy for queue {}", queue);
throw e;
}
if (conf == null) {
// State store does not currently support setting a policy back to null
// because it reads the queue name to set from the policy!
LOG.warn("Skip setting policy to null for queue {} into state store",
queue);
return;
}
// Compare with configuration cache, if different, write the conf into
// store and update our conf and manager cache
if (!confCacheEqual(queue, conf)) {
try {
if (readOnly) {
LOG.info("[read-only] Skipping policy update for queue {}", queue);
return;
}
LOG.info("Updating policy for queue {} into state store", queue);
stateStore.setPolicyConfiguration(conf);
policyConfMap.put(queue, conf);
policyManagerMap.put(queue, policyManager);
} catch (YarnException e) {
LOG.warn("Error writing SubClusterPolicyConfiguration to state "
+ "store for queue: {}", queue);
throw e;
}
} else {
LOG.info("Setting unchanged policy - state store write skipped");
}
}
|
@Test
public void testReadOnly() throws YarnException {
conf.setBoolean(YarnConfiguration.GPG_POLICY_GENERATOR_READONLY, true);
stateStore = mock(MemoryFederationStateStore.class);
facade.reinitialize(stateStore, conf);
when(stateStore.getPolicyConfiguration(Matchers.any(
GetSubClusterPolicyConfigurationRequest.class))).thenReturn(
GetSubClusterPolicyConfigurationResponse.newInstance(testConf));
policyFacade = new GPGPolicyFacade(facade, conf);
// If we set a policy, the state store should be untouched
WeightedLocalityPolicyManager manager =
new WeightedLocalityPolicyManager();
// Add a test policy for test queue
manager.setQueue(TEST_QUEUE);
manager.getWeightedPolicyInfo().setAMRMPolicyWeights(
GPGUtils.createUniformWeights(subClusterIds));
manager.getWeightedPolicyInfo().setRouterPolicyWeights(
GPGUtils.createUniformWeights(subClusterIds));
policyFacade.setPolicyManager(manager);
verify(stateStore, times(0)).setPolicyConfiguration(
Matchers.any(SetSubClusterPolicyConfigurationRequest.class));
}
|
public void archive(final Archive archive, final Path workdir, final List<Path> files,
final ProgressListener listener, final TranscriptListener transcript) throws BackgroundException {
command.send(archive.getCompressCommand(workdir, files), listener, transcript);
}
|
@Test
@Ignore
public void testArchive() throws Exception {
final SFTPCompressFeature feature = new SFTPCompressFeature(session);
for(Archive archive : Archive.getKnownArchives()) {
final Path workdir = new SFTPHomeDirectoryService(session).find();
final Path test = new Path(workdir, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file));
session.getFeature(Touch.class).touch(test, new TransferStatus());
feature.archive(archive, workdir, Collections.singletonList(test), new ProgressListener() {
@Override
public void message(final String message) {
//
}
}, new DisabledTranscriptListener());
assertTrue(new SFTPFindFeature(session).find(archive.getArchive(Collections.singletonList(test))));
new SFTPDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(),
new Delete.DisabledCallback());
assertFalse(new SFTPFindFeature(session).find(test));
feature.unarchive(archive, archive.getArchive(Collections.singletonList(test)), new ProgressListener() {
@Override
public void message(final String message) {
//
}
}, new DisabledTranscriptListener());
assertTrue(new SFTPFindFeature(session).find(test));
new SFTPDeleteFeature(session).delete(Collections.singletonList(archive.getArchive(
Collections.singletonList(test)
)), new DisabledLoginCallback(), new Delete.DisabledCallback());
new SFTPDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(),
new Delete.DisabledCallback());
}
}
|
@Override
public Object evaluate(final ProcessingDTO processingDTO) {
String input = (String) getFromPossibleSources(name, processingDTO)
.orElse(null);
if (input == null) {
return mapMissingTo;
}
return input.equals(value) ? 1.0 : 0.0;
}
|
@Test
void evaluateMissingValue() {
String fieldName = "fieldName";
String fieldValue = "fieldValue";
Number mapMissingTo = 1.0;
KiePMMLNormDiscrete kiePMMLNormContinuous = getKiePMMLNormDiscrete(fieldName, fieldValue, mapMissingTo);
ProcessingDTO processingDTO = getProcessingDTO(Collections.emptyList());
Object retrieved = kiePMMLNormContinuous.evaluate(processingDTO);
assertThat(retrieved).isNotNull();
assertThat(retrieved).isEqualTo(mapMissingTo);
}
|
public static short translateBucketAcl(AccessControlList acl, String userId) {
short mode = (short) 0;
for (Grant grant : acl.getGrantsAsList()) {
Permission perm = grant.getPermission();
Grantee grantee = grant.getGrantee();
if (perm.equals(Permission.Read)) {
if (isUserIdInGrantee(grantee, userId)) {
// If the bucket is readable by the user, add r and x to the owner mode.
mode |= (short) 0500;
}
} else if (perm.equals(Permission.Write)) {
if (isUserIdInGrantee(grantee, userId)) {
// If the bucket is writable by the user, +w to the owner mode.
mode |= (short) 0200;
}
} else if (perm.equals(Permission.FullControl)) {
if (isUserIdInGrantee(grantee, userId)) {
// If the user has full control to the bucket, +rwx to the owner mode.
mode |= (short) 0700;
}
}
}
return mode;
}
|
@Test
public void translateUserFullPermission() {
mAcl.grantPermission(mUserGrantee, Permission.FullControl);
Assert.assertEquals((short) 0700, S3AUtils.translateBucketAcl(mAcl, ID));
Assert.assertEquals((short) 0000, S3AUtils.translateBucketAcl(mAcl, OTHER_ID));
}
|
@Override
public int compare( Object data1, Object data2 ) throws KettleValueException {
InetAddress inet1 = getInternetAddress( data1 );
InetAddress inet2 = getInternetAddress( data2 );
int cmp = 0;
if ( inet1 == null ) {
if ( inet2 == null ) {
cmp = 0;
} else {
cmp = -1;
}
} else if ( inet2 == null ) {
cmp = 1;
} else {
BigDecimal bd1 = getBigNumber( inet1 );
BigDecimal bd2 = getBigNumber( inet2 );
cmp = bd1.compareTo( bd2 );
}
if ( isSortedDescending() ) {
return -cmp;
} else {
return cmp;
}
}
|
@Test
public void testCompare_PDI17270() throws UnknownHostException, KettleValueException {
ValueMetaInternetAddress vm = new ValueMetaInternetAddress();
InetAddress smaller = InetAddress.getByName( "0.0.0.0" );
InetAddress larger = InetAddress.getByName( "255.250.200.128" );
assertEquals( -1, vm.compare( smaller, larger ) );
assertEquals( 1, vm.compare( larger, smaller ) );
smaller = InetAddress.getByName( "0.0.0.0" );
larger = InetAddress.getByName( "192.168.10.0" );
assertEquals( -1, vm.compare( smaller, larger ) );
assertEquals( 1, vm.compare( larger, smaller ) );
smaller = InetAddress.getByName( "192.168.10.0" );
larger = InetAddress.getByName( "255.250.200.128" );
assertEquals( -1, vm.compare( smaller, larger ) );
assertEquals( 1, vm.compare( larger, smaller ) );
}
|
@Override
public List<TaskProperty> getPropertiesForDisplay() {
return new ArrayList<>();
}
|
@Test
public void shouldReturnEmptyPropertiesForDisplay() {
assertThat(new KillAllChildProcessTask().getPropertiesForDisplay().isEmpty(), is(true));
}
|
@Override
public PGobject parse(final String value) {
try {
PGobject result = new PGobject();
result.setType("bit");
result.setValue(value);
return result;
} catch (final SQLException ex) {
throw new SQLWrapperException(ex);
}
}
|
@Test
void assertParse() {
PGobject actual = new PostgreSQLBitValueParser().parse("1");
assertThat(actual.getType(), is("bit"));
assertThat(actual.getValue(), is("1"));
}
|
@Override
@DSTransactional // 多数据源,使用 @DSTransactional 保证本地事务,以及数据源的切换
public Long createTenant(TenantSaveReqVO createReqVO) {
// 校验租户名称是否重复
validTenantNameDuplicate(createReqVO.getName(), null);
// 校验租户域名是否重复
validTenantWebsiteDuplicate(createReqVO.getWebsite(), null);
// 校验套餐被禁用
TenantPackageDO tenantPackage = tenantPackageService.validTenantPackage(createReqVO.getPackageId());
// 创建租户
TenantDO tenant = BeanUtils.toBean(createReqVO, TenantDO.class);
tenantMapper.insert(tenant);
// 创建租户的管理员
TenantUtils.execute(tenant.getId(), () -> {
// 创建角色
Long roleId = createRole(tenantPackage);
// 创建用户,并分配角色
Long userId = createUser(roleId, createReqVO);
// 修改租户的管理员
tenantMapper.updateById(new TenantDO().setId(tenant.getId()).setContactUserId(userId));
});
return tenant.getId();
}
|
@Test
public void testCreateTenant() {
// mock 套餐 100L
TenantPackageDO tenantPackage = randomPojo(TenantPackageDO.class, o -> o.setId(100L));
when(tenantPackageService.validTenantPackage(eq(100L))).thenReturn(tenantPackage);
// mock 角色 200L
when(roleService.createRole(argThat(role -> {
assertEquals(RoleCodeEnum.TENANT_ADMIN.getName(), role.getName());
assertEquals(RoleCodeEnum.TENANT_ADMIN.getCode(), role.getCode());
assertEquals(0, role.getSort());
assertEquals("系统自动生成", role.getRemark());
return true;
}), eq(RoleTypeEnum.SYSTEM.getType()))).thenReturn(200L);
// mock 用户 300L
when(userService.createUser(argThat(user -> {
assertEquals("yunai", user.getUsername());
assertEquals("yuanma", user.getPassword());
assertEquals("芋道", user.getNickname());
assertEquals("15601691300", user.getMobile());
return true;
}))).thenReturn(300L);
// 准备参数
TenantSaveReqVO reqVO = randomPojo(TenantSaveReqVO.class, o -> {
o.setContactName("芋道");
o.setContactMobile("15601691300");
o.setPackageId(100L);
o.setStatus(randomCommonStatus());
o.setWebsite("https://www.iocoder.cn");
o.setUsername("yunai");
o.setPassword("yuanma");
}).setId(null); // 设置为 null,方便后面校验
// 调用
Long tenantId = tenantService.createTenant(reqVO);
// 断言
assertNotNull(tenantId);
// 校验记录的属性是否正确
TenantDO tenant = tenantMapper.selectById(tenantId);
assertPojoEquals(reqVO, tenant, "id");
assertEquals(300L, tenant.getContactUserId());
// verify 分配权限
verify(permissionService).assignRoleMenu(eq(200L), same(tenantPackage.getMenuIds()));
// verify 分配角色
verify(permissionService).assignUserRole(eq(300L), eq(singleton(200L)));
}
|
@GET
@Path("{name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response getConfig(@PathParam("name") String configName) {
log.trace(String.format(MESSAGE_CONFIG, QUERY));
final TelemetryConfig config =
nullIsNotFound(configService.getConfig(configName), CONFIG_NOT_FOUND);
final ObjectNode root = codec(TelemetryConfig.class).encode(config, this);
return ok(root).build();
}
|
@Test
public void testUpdateConfigAddressWithoutOperation() {
expect(mockConfigAdminService.getConfig(anyString())).andReturn(null).once();
replay(mockConfigAdminService);
final WebTarget wt = target();
Response response = wt.path(PATH + "/address/test1/address1")
.request(MediaType.APPLICATION_JSON_TYPE)
.put(Entity.json(""));
final int status = response.getStatus();
assertEquals(304, status);
verify(mockConfigAdminService);
}
|
public static boolean shouldStartHazelcast(AppSettings appSettings) {
return isClusterEnabled(appSettings.getProps()) && toNodeType(appSettings.getProps()).equals(NodeType.APPLICATION);
}
|
@Test
@UseDataProvider("validIPv4andIPv6Addresses")
public void shouldStartHazelcast_must_be_false_on_SearchNode(String host) {
TestAppSettings settings = newSettingsForSearchNode(host);
assertThat(ClusterSettings.shouldStartHazelcast(settings)).isFalse();
}
|
public static <T> Class<? extends T>[] convertStringListToClassTypeArray(List<String> classNames, Class<? extends T> targetClassType) {
Class<? extends T>[] array = (Class<? extends T>[]) Array.newInstance(Class.class, classNames.size());
return classNames.stream()
.map(className -> (Class<? extends T>) convertStringToClassType(className, targetClassType))
.collect(Collectors.toList())
.toArray(array);
}
|
@Test
public void testConvertStringsToClassesList() {
List<String> recordExceptionsList = List.of("java.lang.Exception", "java.lang.RuntimeException");
Class<? extends Throwable>[] recordExceptions = ClassParseUtil
.convertStringListToClassTypeArray(recordExceptionsList, Throwable.class);
Assertions.assertThat(recordExceptions).containsExactlyInAnyOrder(Exception.class, RuntimeException.class);
}
|
public static String format(Integer id) {
return format(id, " ");
}
|
@Test
public void testFormat() {
assertEquals(AreaUtils.format(110105), "北京市 北京市 朝阳区");
assertEquals(AreaUtils.format(1), "中国");
assertEquals(AreaUtils.format(2), "蒙古");
}
|
void updateInactivityStateIfExpired(long ts, DeviceId deviceId, DeviceStateData stateData) {
log.trace("Processing state {} for device {}", stateData, deviceId);
if (stateData != null) {
DeviceState state = stateData.getState();
if (!isActive(ts, state)
&& (state.getLastInactivityAlarmTime() == 0L || state.getLastInactivityAlarmTime() <= state.getLastActivityTime())
&& stateData.getDeviceCreationTime() + state.getInactivityTimeout() <= ts) {
if (partitionService.resolve(ServiceType.TB_CORE, stateData.getTenantId(), deviceId).isMyPartition()) {
reportInactivity(ts, deviceId, stateData);
} else {
cleanupEntity(deviceId);
}
}
} else {
log.debug("[{}] Device that belongs to other server is detected and removed.", deviceId);
cleanupEntity(deviceId);
}
}
|
@Test
public void givenNotMyPartition_whenUpdateInactivityTimeoutIfExpired_thenShouldCleanupDevice() {
// GIVEN
long currentTime = System.currentTimeMillis();
DeviceState deviceState = DeviceState.builder()
.active(true)
.lastConnectTime(currentTime - 8000)
.lastActivityTime(currentTime - 4000)
.lastDisconnectTime(0)
.lastInactivityAlarmTime(0)
.inactivityTimeout(3000)
.build();
DeviceStateData stateData = DeviceStateData.builder()
.tenantId(tenantId)
.deviceId(deviceId)
.deviceCreationTime(currentTime - 10000)
.state(deviceState)
.build();
service.deviceStates.put(deviceId, stateData);
var notMyTpi = TopicPartitionInfo.builder().myPartition(false).build();
given(partitionService.resolve(ServiceType.TB_CORE, tenantId, deviceId)).willReturn(notMyTpi);
// WHEN
service.updateInactivityStateIfExpired(System.currentTimeMillis(), deviceId, stateData);
// THEN
assertThat(service.deviceStates.get(deviceId)).isNull();
assertThat(service.deviceStates.size()).isEqualTo(0);
assertThat(service.deviceStates.isEmpty()).isTrue();
}
|
@Override
public void prepare(
PrepareJobRequest request, StreamObserver<PrepareJobResponse> responseObserver) {
try {
LOG.trace("{} {}", PrepareJobRequest.class.getSimpleName(), request);
// insert preparation
String preparationId =
String.format("%s_%s", request.getJobName(), UUID.randomUUID().toString());
Struct pipelineOptions = request.getPipelineOptions();
if (pipelineOptions == null) {
throw new NullPointerException("Encountered null pipeline options.");
}
LOG.trace("PIPELINE OPTIONS {} {}", pipelineOptions.getClass(), pipelineOptions);
JobPreparation preparation =
JobPreparation.builder()
.setId(preparationId)
.setPipeline(request.getPipeline())
.setOptions(pipelineOptions)
.build();
JobPreparation previous = preparations.putIfAbsent(preparationId, preparation);
if (previous != null) {
// this should never happen with a UUID
String errMessage =
String.format("A job with the preparation ID \"%s\" already exists.", preparationId);
StatusException exception = Status.NOT_FOUND.withDescription(errMessage).asException();
responseObserver.onError(exception);
return;
}
String stagingSessionToken = stagingServiceTokenProvider.apply(preparationId);
stagingSessionTokens.putIfAbsent(preparationId, stagingSessionToken);
stagingService
.getService()
.registerJob(stagingSessionToken, extractDependencies(request.getPipeline()));
// send response
PrepareJobResponse response =
PrepareJobResponse.newBuilder()
.setPreparationId(preparationId)
.setArtifactStagingEndpoint(stagingServiceDescriptor)
.setStagingSessionToken(stagingSessionToken)
.build();
responseObserver.onNext(response);
responseObserver.onCompleted();
} catch (Exception e) {
LOG.error("Could not prepare job with name {}", request.getJobName(), e);
responseObserver.onError(Status.INTERNAL.withCause(e).asException());
}
}
|
@Test
public void testPrepareIsSuccessful() {
JobApi.PrepareJobRequest request =
JobApi.PrepareJobRequest.newBuilder()
.setJobName(TEST_JOB_NAME)
.setPipeline(RunnerApi.Pipeline.getDefaultInstance())
.setPipelineOptions(Struct.getDefaultInstance())
.build();
RecordingObserver<JobApi.PrepareJobResponse> recorder = new RecordingObserver<>();
service.prepare(request, recorder);
assertThat(recorder.isSuccessful(), is(true));
assertThat(recorder.values, hasSize(1));
JobApi.PrepareJobResponse response = recorder.values.get(0);
assertThat(response.getArtifactStagingEndpoint(), notNullValue());
assertThat(response.getPreparationId(), notNullValue());
}
|
@Override
public boolean match(Message msg, StreamRule rule) {
if (msg.getField(rule.getField()) == null) {
return rule.getInverted();
}
final String value = msg.getField(rule.getField()).toString();
return rule.getInverted() ^ value.trim().equals(rule.getValue());
}
|
@Test
public void testInvertedMatch() {
StreamRule rule = getSampleRule();
rule.setInverted(true);
Message msg = getSampleMessage();
msg.addField("something", "nonono");
StreamRuleMatcher matcher = getMatcher(rule);
assertTrue(matcher.match(msg, rule));
}
|
static String parseAccessToken(String responseBody) throws IOException {
ObjectMapper mapper = new ObjectMapper();
JsonNode rootNode = mapper.readTree(responseBody);
JsonNode accessTokenNode = rootNode.at("/access_token");
if (accessTokenNode == null) {
// Only grab the first N characters so that if the response body is huge, we don't
// blow up.
String snippet = responseBody;
if (snippet.length() > MAX_RESPONSE_BODY_LENGTH) {
int actualLength = responseBody.length();
String s = responseBody.substring(0, MAX_RESPONSE_BODY_LENGTH);
snippet = String.format("%s (trimmed to first %d characters out of %d total)", s, MAX_RESPONSE_BODY_LENGTH, actualLength);
}
throw new IOException(String.format("The token endpoint response did not contain an access_token value. Response: (%s)", snippet));
}
return sanitizeString("the token endpoint response's access_token JSON attribute", accessTokenNode.textValue());
}
|
@Test
public void testParseAccessTokenEmptyAccessToken() {
ObjectMapper mapper = new ObjectMapper();
ObjectNode node = mapper.createObjectNode();
node.put("access_token", "");
assertThrows(IllegalArgumentException.class, () -> HttpAccessTokenRetriever.parseAccessToken(mapper.writeValueAsString(node)));
}
|
public static SessionInformations getSessionInformationsBySessionId(String sessionId) {
final HttpSession session = getSessionById(sessionId);
if (session == null) {
return null;
}
// dans Jenkins notamment, une session invalidée peut rester un peu dans cette map
try {
return new SessionInformations(session, true);
} catch (final Exception e) {
// Tomcat can throw "java.lang.IllegalStateException: getLastAccessedTime: Session already invalidated"
return null;
}
}
|
@Test
public void testGetSessionInformationsBySessionId() {
final HttpSessionEvent sessionEvent = createSessionEvent();
sessionListener.sessionCreated(sessionEvent);
final SessionInformations sessionInformations = SessionListener
.getSessionInformationsBySessionId(sessionEvent.getSession().getId());
assertEquals("getSessionInformationsBySessionId", sessionEvent.getSession().getId(),
sessionInformations.getId());
assertNull("getSessionInformationsBySessionId",
SessionListener.getSessionInformationsBySessionId("n'importe quoi"));
}
|
int getLowestObservedDistributionBits() {
return lowestObservedDistributionBits;
}
|
@Test
void lowest_observed_distribution_bit_is_tracked_across_states() {
final StateVersionTracker versionTracker = createWithMockedMetrics();
updateAndPromote(versionTracker, stateWithoutAnnotations("bits:15 distributor:2 storage:2"), 100);
assertEquals(15, versionTracker.getLowestObservedDistributionBits());
updateAndPromote(versionTracker, stateWithoutAnnotations("bits:17 distributor:2 storage:2"), 200);
assertEquals(15, versionTracker.getLowestObservedDistributionBits());
updateAndPromote(versionTracker, stateWithoutAnnotations("bits:14 distributor:2 storage:2"), 300);
assertEquals(14, versionTracker.getLowestObservedDistributionBits());
}
|
public static <T> Inner<T> create() {
return new Inner<>();
}
|
@Test
@Category(NeedsRunner.class)
public void renameNestedInArrayFields() {
Schema nestedSchema = Schema.builder().addStringField("field1").addInt32Field("field2").build();
Schema schema =
Schema.builder().addArrayField("array", Schema.FieldType.row(nestedSchema)).build();
PCollection<Row> renamed =
pipeline
.apply(
Create.of(
Row.withSchema(schema)
.addValue(
ImmutableList.of(
Row.withSchema(nestedSchema).addValues("one", 1).build()))
.build(),
Row.withSchema(schema)
.addValue(
ImmutableList.of(
Row.withSchema(nestedSchema).addValues("two", 1).build()))
.build())
.withRowSchema(schema))
.apply(
RenameFields.<Row>create()
.rename("array.field1", "new1")
.rename("array.field2", "new2"));
Schema expectedNestedSchema =
Schema.builder().addStringField("new1").addInt32Field("new2").build();
Schema expectedSchema =
Schema.builder().addArrayField("array", Schema.FieldType.row(expectedNestedSchema)).build();
assertEquals(expectedSchema, renamed.getSchema());
List<Row> expectedRows =
ImmutableList.of(
Row.withSchema(expectedSchema)
.addValue(
ImmutableList.of(
Row.withSchema(expectedNestedSchema).addValues("one", 1).build()))
.build(),
Row.withSchema(expectedSchema)
.addValue(
ImmutableList.of(
Row.withSchema(expectedNestedSchema).addValues("two", 1).build()))
.build());
PAssert.that(renamed).containsInAnyOrder(expectedRows);
pipeline.run();
}
|
@Override
public void notify(Metrics metrics) {
WithMetadata withMetadata = (WithMetadata) metrics;
MetricsMetaInfo meta = withMetadata.getMeta();
int scope = meta.getScope();
if (!DefaultScopeDefine.inServiceCatalog(scope) && !DefaultScopeDefine.inServiceInstanceCatalog(scope)
&& !DefaultScopeDefine.inEndpointCatalog(scope) && !DefaultScopeDefine.inServiceRelationCatalog(scope)
&& !DefaultScopeDefine.inServiceInstanceRelationCatalog(scope) && !DefaultScopeDefine.inEndpointRelationCatalog(scope)) {
return;
}
MetaInAlarm metaInAlarm;
if (DefaultScopeDefine.inServiceCatalog(scope)) {
final String serviceId = meta.getId();
final IDManager.ServiceID.ServiceIDDefinition serviceIDDefinition = IDManager.ServiceID.analysisId(
serviceId);
ServiceMetaInAlarm serviceMetaInAlarm = new ServiceMetaInAlarm();
serviceMetaInAlarm.setMetricsName(meta.getMetricsName());
serviceMetaInAlarm.setId(serviceId);
serviceMetaInAlarm.setName(serviceIDDefinition.getName());
metaInAlarm = serviceMetaInAlarm;
} else if (DefaultScopeDefine.inServiceInstanceCatalog(scope)) {
final String instanceId = meta.getId();
final IDManager.ServiceInstanceID.InstanceIDDefinition instanceIDDefinition = IDManager.ServiceInstanceID.analysisId(
instanceId);
final IDManager.ServiceID.ServiceIDDefinition serviceIDDefinition = IDManager.ServiceID.analysisId(
instanceIDDefinition.getServiceId());
ServiceInstanceMetaInAlarm instanceMetaInAlarm = new ServiceInstanceMetaInAlarm();
instanceMetaInAlarm.setMetricsName(meta.getMetricsName());
instanceMetaInAlarm.setId(instanceId);
instanceMetaInAlarm.setName(instanceIDDefinition.getName() + " of " + serviceIDDefinition.getName());
metaInAlarm = instanceMetaInAlarm;
} else if (DefaultScopeDefine.inEndpointCatalog(scope)) {
final String endpointId = meta.getId();
final IDManager.EndpointID.EndpointIDDefinition endpointIDDefinition = IDManager.EndpointID.analysisId(
endpointId);
final IDManager.ServiceID.ServiceIDDefinition serviceIDDefinition = IDManager.ServiceID.analysisId(
endpointIDDefinition.getServiceId());
EndpointMetaInAlarm endpointMetaInAlarm = new EndpointMetaInAlarm();
endpointMetaInAlarm.setMetricsName(meta.getMetricsName());
endpointMetaInAlarm.setId(meta.getId());
endpointMetaInAlarm.setName(
endpointIDDefinition.getEndpointName() + " in " + serviceIDDefinition.getName());
metaInAlarm = endpointMetaInAlarm;
} else if (DefaultScopeDefine.inServiceRelationCatalog(scope)) {
final String serviceRelationId = meta.getId();
final IDManager.ServiceID.ServiceRelationDefine serviceRelationDefine = IDManager.ServiceID.analysisRelationId(
serviceRelationId);
final IDManager.ServiceID.ServiceIDDefinition sourceIdDefinition = IDManager.ServiceID.analysisId(
serviceRelationDefine.getSourceId());
final IDManager.ServiceID.ServiceIDDefinition destIdDefinition = IDManager.ServiceID.analysisId(
serviceRelationDefine.getDestId());
ServiceRelationMetaInAlarm serviceRelationMetaInAlarm = new ServiceRelationMetaInAlarm();
serviceRelationMetaInAlarm.setMetricsName(meta.getMetricsName());
serviceRelationMetaInAlarm.setId(serviceRelationId);
serviceRelationMetaInAlarm.setName(sourceIdDefinition.getName() + " to " + destIdDefinition.getName());
metaInAlarm = serviceRelationMetaInAlarm;
} else if (DefaultScopeDefine.inServiceInstanceRelationCatalog(scope)) {
final String instanceRelationId = meta.getId();
final IDManager.ServiceInstanceID.ServiceInstanceRelationDefine serviceRelationDefine = IDManager.ServiceInstanceID.analysisRelationId(
instanceRelationId);
final IDManager.ServiceInstanceID.InstanceIDDefinition sourceIdDefinition = IDManager.ServiceInstanceID.analysisId(
serviceRelationDefine.getSourceId());
final IDManager.ServiceID.ServiceIDDefinition sourceServiceId = IDManager.ServiceID.analysisId(
sourceIdDefinition.getServiceId());
final IDManager.ServiceInstanceID.InstanceIDDefinition destIdDefinition = IDManager.ServiceInstanceID.analysisId(
serviceRelationDefine.getDestId());
final IDManager.ServiceID.ServiceIDDefinition destServiceId = IDManager.ServiceID.analysisId(
destIdDefinition.getServiceId());
ServiceInstanceRelationMetaInAlarm instanceRelationMetaInAlarm = new ServiceInstanceRelationMetaInAlarm();
instanceRelationMetaInAlarm.setMetricsName(meta.getMetricsName());
instanceRelationMetaInAlarm.setId(instanceRelationId);
instanceRelationMetaInAlarm.setName(sourceIdDefinition.getName() + " of " + sourceServiceId.getName()
+ " to " + destIdDefinition.getName() + " of " + destServiceId.getName());
metaInAlarm = instanceRelationMetaInAlarm;
} else if (DefaultScopeDefine.inEndpointRelationCatalog(scope)) {
final String endpointRelationId = meta.getId();
final IDManager.EndpointID.EndpointRelationDefine endpointRelationDefine = IDManager.EndpointID.analysisRelationId(
endpointRelationId);
final IDManager.ServiceID.ServiceIDDefinition sourceService = IDManager.ServiceID.analysisId(
endpointRelationDefine.getSourceServiceId());
final IDManager.ServiceID.ServiceIDDefinition destService = IDManager.ServiceID.analysisId(
endpointRelationDefine.getDestServiceId());
EndpointRelationMetaInAlarm endpointRelationMetaInAlarm = new EndpointRelationMetaInAlarm();
endpointRelationMetaInAlarm.setMetricsName(meta.getMetricsName());
endpointRelationMetaInAlarm.setId(endpointRelationId);
endpointRelationMetaInAlarm.setName(endpointRelationDefine.getSource() + " in " + sourceService.getName()
+ " to " + endpointRelationDefine.getDest() + " in " + destService.getName());
metaInAlarm = endpointRelationMetaInAlarm;
} else {
return;
}
List<RunningRule> runningRules = core.findRunningRule(meta.getMetricsName());
if (runningRules == null) {
return;
}
runningRules.forEach(rule -> rule.in(metaInAlarm, metrics));
}
|
@Test
public void testNotifyWithServiceInstanceCatalog() {
String metricsName = "service-instance-metrics";
when(metadata.getMetricsName()).thenReturn(metricsName);
when(DefaultScopeDefine.inServiceInstanceCatalog(0)).thenReturn(true);
String instanceInventoryName = "instance-inventory-name";
final String serviceId = IDManager.ServiceID.buildId("service", true);
final String instanceId = IDManager.ServiceInstanceID.buildId(serviceId, instanceInventoryName);
when(metadata.getId()).thenReturn(instanceId);
ArgumentCaptor<MetaInAlarm> metaCaptor = ArgumentCaptor.forClass(MetaInAlarm.class);
notifyHandler.notify(metrics);
verify(rule).in(metaCaptor.capture(), any());
MetaInAlarm metaInAlarm = metaCaptor.getValue();
assertTrue(metaInAlarm instanceof ServiceInstanceMetaInAlarm);
assertEquals("c2VydmljZQ==.1_aW5zdGFuY2UtaW52ZW50b3J5LW5hbWU=", metaInAlarm.getId0());
assertEquals(DefaultScopeDefine.SERVICE_INSTANCE_CATALOG_NAME, metaInAlarm.getScope());
assertEquals("instance-inventory-name of service", metaInAlarm.getName());
assertEquals(DefaultScopeDefine.SERVICE_INSTANCE, metaInAlarm.getScopeId());
}
|
public static String extractFromURIPattern(String paramsRuleString, String pattern, String realURI) {
Map<String, String> criteriaMap = new TreeMap<>();
pattern = sanitizeURLForRegExp(pattern);
realURI = sanitizeURLForRegExp(realURI);
// Build a pattern for extracting parts from pattern and a pattern for extracting values
// from realURI. Supporting both {id} and :id.
String partsPattern = null;
String valuesPattern = null;
if (pattern.indexOf("/{") != -1) {
partsPattern = pattern.replaceAll(CURLY_PART_PATTERN, CURLY_PART_EXTRACTION_PATTERN);
valuesPattern = pattern.replaceAll(CURLY_PART_PATTERN, "(.+)");
} else {
partsPattern = pattern.replaceAll("(:[^:^/]+)", "\\:(.+)");
valuesPattern = pattern.replaceAll("(:[^:^/]+)", "(.+)");
}
if (pattern.contains("$")) {
partsPattern = partsPattern.replace("$", "\\$");
valuesPattern = valuesPattern.replace("$", "\\$");
}
Pattern partsP = Pattern.compile(partsPattern);
Matcher partsM = partsP.matcher(pattern);
Pattern valuesP = Pattern.compile(valuesPattern);
Matcher valuesM = valuesP.matcher(realURI);
// Rule string can be a URI_ELEMENT rule and containers ?? elements.
// We must remove them before parsing the URI parts.
if (paramsRuleString.contains("??")) {
paramsRuleString = paramsRuleString.split("\\?\\?")[0];
}
final var paramsRule = Arrays.stream(paramsRuleString.split("&&")).map(String::trim).distinct()
.collect(Collectors.toUnmodifiableSet());
// Both should match and have the same group count.
if (valuesM.matches() && partsM.matches() && valuesM.groupCount() == partsM.groupCount()) {
for (int i = 1; i < partsM.groupCount() + 1; i++) {
final String paramName = partsM.group(i);
final String paramValue = valuesM.group(i);
if (paramsRule.contains(paramName)) {
criteriaMap.put(paramName, paramValue);
}
}
}
// Just appends sorted entries, separating them with /.
StringBuilder result = new StringBuilder();
for (Map.Entry<String, String> criteria : criteriaMap.entrySet()) {
result.append("/").append(criteria.getKey()).append("=").append(criteria.getValue());
}
return result.toString();
}
|
@Test
void testExtractFromURIPattern2() {
String resourcePath = "/pet/2";
String operationName = "/pet/:petId";
String paramRule = "petId";
String dispatchCriteria = DispatchCriteriaHelper.extractFromURIPattern(paramRule, operationName, resourcePath);
assertEquals("/petId=2", dispatchCriteria);
resourcePath = "/order/123456";
operationName = "/order/:id";
paramRule = "id";
dispatchCriteria = DispatchCriteriaHelper.extractFromURIPattern(paramRule, operationName, resourcePath);
assertEquals("/id=123456", dispatchCriteria);
}
|
@Override
public void verifyCompatibility(WindowFn<?, ?> other) throws IncompatibleWindowException {
if (!this.isCompatible(other)) {
throw new IncompatibleWindowException(
other,
String.format(
"Only %s objects with the same size, period and offset are compatible.",
SlidingWindows.class.getSimpleName()));
}
}
|
@Test
public void testVerifyCompatibility() throws IncompatibleWindowException {
SlidingWindows.of(Duration.millis(10))
.verifyCompatibility(SlidingWindows.of(Duration.millis(10)));
thrown.expect(IncompatibleWindowException.class);
SlidingWindows.of(Duration.millis(10))
.verifyCompatibility(SlidingWindows.of(Duration.millis(20)));
}
|
public ClientTelemetrySender telemetrySender() {
return clientTelemetrySender;
}
|
@Test
public void testHandleResponseGetSubscriptionsWithoutMetrics() {
ClientTelemetryReporter.DefaultClientTelemetrySender telemetrySender = (ClientTelemetryReporter.DefaultClientTelemetrySender) clientTelemetryReporter.telemetrySender();
assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.SUBSCRIPTION_IN_PROGRESS));
Uuid clientInstanceId = Uuid.randomUuid();
GetTelemetrySubscriptionsResponse response = new GetTelemetrySubscriptionsResponse(
new GetTelemetrySubscriptionsResponseData()
.setClientInstanceId(clientInstanceId)
.setSubscriptionId(5678)
.setAcceptedCompressionTypes(Collections.singletonList(CompressionType.GZIP.id))
.setPushIntervalMs(20000));
telemetrySender.handleResponse(response);
// Again subscription should be required.
assertEquals(ClientTelemetryState.SUBSCRIPTION_NEEDED, telemetrySender.state());
ClientTelemetryReporter.ClientTelemetrySubscription subscription = telemetrySender.subscription();
assertNotNull(subscription);
assertEquals(clientInstanceId, subscription.clientInstanceId());
assertEquals(5678, subscription.subscriptionId());
assertEquals(Collections.singletonList(CompressionType.GZIP), subscription.acceptedCompressionTypes());
assertEquals(20000, subscription.pushIntervalMs());
assertEquals(ClientTelemetryUtils.SELECTOR_NO_METRICS, subscription.selector());
}
|
public boolean init( StepMetaInterface smi, StepDataInterface sdi ) {
meta = (GetXMLDataMeta) smi;
data = (GetXMLDataData) sdi;
if ( super.init( smi, sdi ) ) {
data.rownr = 1L;
data.nrInputFields = meta.getInputFields().length;
// correct attribute path if needed
// do it once
for ( int i = 0; i < data.nrInputFields; i++ ) {
GetXMLDataField xmlDataField = meta.getInputFields()[i];
// Resolve variable substitution
String XPathValue = environmentSubstitute( xmlDataField.getXPath() );
if ( xmlDataField.getElementType() == GetXMLDataField.ELEMENT_TYPE_ATTRIBUT ) {
// We have an attribute
// do we need to add leading @?
// Only put @ to the last element in path, not in front at all
int last = XPathValue.lastIndexOf( GetXMLDataMeta.N0DE_SEPARATOR );
if ( last > -1 ) {
last++;
String attribut = XPathValue.substring( last, XPathValue.length() );
if ( !attribut.startsWith( GetXMLDataMeta.AT ) ) {
XPathValue = XPathValue.substring( 0, last ) + GetXMLDataMeta.AT + attribut;
}
} else {
if ( !XPathValue.startsWith( GetXMLDataMeta.AT ) ) {
XPathValue = GetXMLDataMeta.AT + XPathValue;
}
}
}
xmlDataField.setResolvedXPath( XPathValue );
}
data.PathValue = environmentSubstitute( meta.getLoopXPath() );
if ( Utils.isEmpty( data.PathValue ) ) {
logError( BaseMessages.getString( PKG, "GetXMLData.Error.EmptyPath" ) );
return false;
}
if ( !data.PathValue.substring( 0, 1 ).equals( GetXMLDataMeta.N0DE_SEPARATOR ) ) {
data.PathValue = GetXMLDataMeta.N0DE_SEPARATOR + data.PathValue;
}
if ( log.isDetailed() ) {
logDetailed( BaseMessages.getString( PKG, "GetXMLData.Log.LoopXPath", data.PathValue ) );
}
data.prunePath = environmentSubstitute( meta.getPrunePath() );
if ( data.prunePath != null ) {
if ( Utils.isEmpty( data.prunePath.trim() ) ) {
data.prunePath = null;
} else {
// ensure a leading slash
if ( !data.prunePath.startsWith( GetXMLDataMeta.N0DE_SEPARATOR ) ) {
data.prunePath = GetXMLDataMeta.N0DE_SEPARATOR + data.prunePath;
}
// check if other conditions apply that do not allow pruning
if ( meta.isInFields() ) {
data.prunePath = null; // not possible by design, could be changed later on
}
}
}
return true;
}
return false;
}
|
@Test
public void testGetXMLData_MissingNodesYieldNullValues() throws Exception {
KettleEnvironment.init();
System.setProperty( Const.KETTLE_XML_MISSING_TAG_YIELDS_NULL_VALUE, "Y" );
testGetXMLData( null );
}
|
public static Object extractValue(Object object, String attributeName, boolean failOnMissingAttribute) throws Exception {
return createGetter(object, attributeName, failOnMissingAttribute).getValue(object);
}
|
@Test
public void extractValue_whenIntermediateFieldIsInterfaceAndDoesNotContainField_returnsNull()
throws Exception {
OuterObject object = new OuterObject();
assertNull(ReflectionHelper.extractValue(object, "emptyInterface.doesNotExist", false));
}
|
public boolean assign(DefaultIssue issue, @Nullable UserDto user, IssueChangeContext context) {
String assigneeUuid = user != null ? user.getUuid() : null;
if (!Objects.equals(assigneeUuid, issue.assignee())) {
String newAssigneeName = user == null ? null : user.getName();
issue.setFieldChange(context, ASSIGNEE, UNUSED, newAssigneeName);
issue.setAssigneeUuid(user != null ? user.getUuid() : null);
issue.setUpdateDate(context.date());
issue.setChanged(true);
issue.setSendNotifications(true);
return true;
}
return false;
}
|
@Test
void change_assignee() {
UserDto user = newUserDto().setLogin("emmerik").setName("Emmerik");
issue.setAssigneeUuid("user_uuid");
boolean updated = underTest.assign(issue, user, context);
assertThat(updated).isTrue();
assertThat(issue.assignee()).isEqualTo(user.getUuid());
assertThat(issue.mustSendNotifications()).isTrue();
FieldDiffs.Diff diff = issue.currentChange().get(ASSIGNEE);
assertThat(diff.oldValue()).isEqualTo(UNUSED);
assertThat(diff.newValue()).isEqualTo(user.getName());
}
|
protected void addModel(EpoxyModel<?> modelToAdd) {
int initialSize = models.size();
pauseModelListNotifications();
models.add(modelToAdd);
resumeModelListNotifications();
notifyItemRangeInserted(initialSize, 1);
}
|
@Test
public void testAddModel() {
testAdapter.addModel(new TestModel());
verify(observer).onItemRangeInserted(0, 1);
assertEquals(1, testAdapter.models.size());
testAdapter.addModel(new TestModel());
verify(observer).onItemRangeInserted(1, 1);
assertEquals(2, testAdapter.models.size());
checkDifferState();
}
|
public void isIn(@Nullable Iterable<?> iterable) {
checkNotNull(iterable);
if (!contains(iterable, actual)) {
failWithActual("expected any of", iterable);
}
}
|
@Test
public void isInFailure() {
expectFailure.whenTesting().that("x").isIn(oneShotIterable("a", "b", "c"));
assertFailureKeys("expected any of", "but was");
assertFailureValue("expected any of", "[a, b, c]");
}
|
@VisibleForTesting
CopyObjectResult atomicCopy(
S3ResourceId sourcePath, S3ResourceId destinationPath, ObjectMetadata sourceObjectMetadata)
throws AmazonClientException {
CopyObjectRequest copyObjectRequest =
new CopyObjectRequest(
sourcePath.getBucket(),
sourcePath.getKey(),
destinationPath.getBucket(),
destinationPath.getKey());
copyObjectRequest.setNewObjectMetadata(sourceObjectMetadata);
copyObjectRequest.setStorageClass(config.getS3StorageClass());
copyObjectRequest.setSourceSSECustomerKey(config.getSSECustomerKey());
copyObjectRequest.setDestinationSSECustomerKey(config.getSSECustomerKey());
return amazonS3.get().copyObject(copyObjectRequest);
}
|
@Test
public void testAtomicCopy() {
testAtomicCopy(s3Config("s3"));
testAtomicCopy(s3Config("other"));
testAtomicCopy(s3ConfigWithSSECustomerKey("s3"));
testAtomicCopy(s3ConfigWithSSECustomerKey("other"));
}
|
@Override
public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException {
return this.list(directory, listener, String.valueOf(Path.DELIMITER));
}
|
@Test
public void testListLexicographicSortOrderAssumption() throws Exception {
final Path container = new Path("cyberduck-test-eu", EnumSet.of(Path.Type.directory, Path.Type.volume));
final Path directory = new GoogleStorageDirectoryFeature(session).mkdir(
new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
assertTrue(new GoogleStorageObjectListService(session).list(directory, new DisabledListProgressListener()).isEmpty());
final List<String> files = new ArrayList<>(Arrays.asList(
"Z", "aa", "0a", "a", "AAA", "B", "~$a", ".c"
));
for(String f : files) {
new GoogleStorageTouchFeature(session).touch(new Path(directory, f, EnumSet.of(Path.Type.file)), new TransferStatus());
}
final List<String> folders = Arrays.asList("b", "BB");
for(String f : folders) {
new GoogleStorageDirectoryFeature(session).mkdir(new Path(directory, f, EnumSet.of(Path.Type.directory)), new TransferStatus());
}
files.addAll(folders);
files.sort(session.getHost().getProtocol().getListComparator());
final AttributedList<Path> list = new GoogleStorageObjectListService(session).list(directory, new IndexedListProgressListener() {
@Override
public void message(final String message) {
//
}
@Override
public void visit(final AttributedList<Path> list, final int index, final Path file) {
assertEquals(files.get(index), file.getName());
}
});
for(int i = 0; i < list.size(); i++) {
assertEquals(files.get(i), list.get(i).getName());
new GoogleStorageDeleteFeature(session).delete(Collections.singletonList(list.get(i)), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
new GoogleStorageDeleteFeature(session).delete(Collections.singletonList(directory), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
@VisibleForTesting
public static boolean isTtlEnough(ConfidenceBasedTtlInfo ttlInfo, Duration estimatedExecutionTime)
{
Instant expiryTime = ttlInfo.getExpiryInstant();
long timeRemainingInSeconds = SECONDS.between(Instant.now(), expiryTime);
return new Duration(Math.max(timeRemainingInSeconds, 0), TimeUnit.SECONDS).compareTo(estimatedExecutionTime) >= 0;
}
|
@Test
public void testTtlComparison()
{
ConfidenceBasedTtlInfo confidenceBasedTtlInfo = ConfidenceBasedTtlInfo.getInfiniteTtl();
Duration estimatedExecutionTime = new Duration(1, TimeUnit.HOURS);
assertTrue(SimpleTtlNodeSelector.isTtlEnough(confidenceBasedTtlInfo, estimatedExecutionTime));
}
|
@Override
public int run(String[] args) throws Exception {
if (args.length != 2) {
return usage(args);
}
String action = args[0];
String name = args[1];
int result;
if (A_LOAD.equals(action)) {
result = loadClass(name);
} else if (A_CREATE.equals(action)) {
//first load to separate load errors from create
result = loadClass(name);
if (result == SUCCESS) {
//class loads, so instantiate it
result = createClassInstance(name);
}
} else if (A_RESOURCE.equals(action)) {
result = loadResource(name);
} else if (A_PRINTRESOURCE.equals(action)) {
result = dumpResource(name);
} else {
result = usage(args);
}
return result;
}
|
@Test
public void testFailsNoSuchResource() throws Throwable {
run(FindClass.E_NOT_FOUND,
FindClass.A_RESOURCE,
"org/apache/hadoop/util/ThereIsNoSuchClass.class");
}
|
public void metricRemoval(KafkaMetric metric) {
ledger.metricRemoval(metric);
}
|
@Test
public void testMetricRemoval() {
metrics.addMetric(metricName, (config, now) -> 100.0);
collector.collect(testEmitter);
assertEquals(2, testEmitter.emittedMetrics().size());
metrics.removeMetric(metricName);
assertFalse(collector.getTrackedMetrics().contains(metricNamingStrategy.metricKey(metricName)));
// verify that the metric was removed.
testEmitter.reset();
collector.collect(testEmitter);
List<SinglePointMetric> collected = testEmitter.emittedMetrics();
assertEquals(1, collected.size());
assertEquals("test.domain.kafka.count.count", collected.get(0).builder().build().getName());
}
|
public void returnFury(Fury fury) {
Objects.requireNonNull(fury);
try {
lock.lock();
idleCacheQueue.add(fury);
activeCacheNumber.decrementAndGet();
furyCondition.signalAll();
} catch (Exception e) {
LOG.error(e.getMessage(), e);
throw new RuntimeException(e);
} finally {
lock.unlock();
}
}
|
@Test
public void testReturnFuryForbidden() {
ClassLoaderFuryPooled pooled = getPooled(4, 9);
Assert.assertThrows(NullPointerException.class, () -> pooled.returnFury(null));
}
|
public int getBackupCount() {
return backupCount;
}
|
@Test
public void testGetBackupCount() {
assertEquals(MapConfig.DEFAULT_BACKUP_COUNT, new MapConfig().getBackupCount());
}
|
KIn deserializeKey(final String topic, final Headers headers, final byte[] data) {
return keyDeserializer.deserialize(topic, headers, data);
}
|
@Test
public void shouldProvideTopicHeadersAndDataToKeyDeserializer() {
final SourceNode<String, String> sourceNode = new MockSourceNode<>(new TheDeserializer(), new TheDeserializer());
final RecordHeaders headers = new RecordHeaders();
final String deserializeKey = sourceNode.deserializeKey("topic", headers, "data".getBytes(StandardCharsets.UTF_8));
assertThat(deserializeKey, is("topic" + headers + "data"));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.