focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
public static String getNativeDataTypeSimpleName( ValueMetaInterface v ) {
try {
return v.getType() != ValueMetaInterface.TYPE_BINARY ? v.getNativeDataTypeClass().getSimpleName() : "Binary";
} catch ( KettleValueException e ) {
LogChannelInterface log = new LogChannel( v );
log.logDebug( BaseMessages.getString( PKG, "FieldHelper.Log.UnknownNativeDataTypeSimpleName" ) );
return "Object";
}
}
|
@Test
public void getNativeDataTypeSimpleName_InetAddress() {
ValueMetaInternetAddress v = new ValueMetaInternetAddress();
assertEquals( "InetAddress", FieldHelper.getNativeDataTypeSimpleName( v ) );
}
|
@Override
public boolean fastPut(K key, V value, long ttl, TimeUnit ttlUnit) {
return get(fastPutAsync(key, value, ttl, ttlUnit));
}
|
@Test
public void testRemoveEmptyEvictionTask() throws InterruptedException {
Config config = createConfig();
config.setMaxCleanUpDelay(2);
config.setMinCleanUpDelay(1);
RedissonClient redisson = Redisson.create(config);
assertThat(redisson.getKeys().count()).isZero();
RMapCache<Integer, Integer> map = redisson.getMapCache("simple", MapCacheOptions.<Integer, Integer>defaults().removeEmptyEvictionTask());
map.fastPut(1, 1, 1, TimeUnit.SECONDS);
EvictionScheduler evictionScheduler = ((Redisson) redisson).getEvictionScheduler();
Map<?, ?> tasks = Reflect.on(evictionScheduler).get("tasks");
assertThat(tasks.isEmpty()).isFalse();
Thread.sleep(6000);
assertThat(tasks.isEmpty()).isTrue();
redisson.shutdown();
}
|
@Override
public PageResult<RewardActivityDO> getRewardActivityPage(RewardActivityPageReqVO pageReqVO) {
return rewardActivityMapper.selectPage(pageReqVO);
}
|
@Test
public void testGetRewardActivityPage() {
// mock 数据
RewardActivityDO dbRewardActivity = randomPojo(RewardActivityDO.class, o -> { // 等会查询到
o.setName("芋艿");
o.setStatus(PromotionActivityStatusEnum.CLOSE.getStatus());
});
rewardActivityMapper.insert(dbRewardActivity);
// 测试 name 不匹配
rewardActivityMapper.insert(cloneIgnoreId(dbRewardActivity, o -> o.setName("土豆")));
// 测试 status 不匹配
rewardActivityMapper.insert(cloneIgnoreId(dbRewardActivity, o -> o.setStatus(PromotionActivityStatusEnum.RUN.getStatus())));
// 准备参数
RewardActivityPageReqVO reqVO = new RewardActivityPageReqVO();
reqVO.setName("芋艿");
reqVO.setStatus(PromotionActivityStatusEnum.CLOSE.getStatus());
// 调用
PageResult<RewardActivityDO> pageResult = rewardActivityService.getRewardActivityPage(reqVO);
// 断言
assertEquals(1, pageResult.getTotal());
assertEquals(1, pageResult.getList().size());
assertPojoEquals(dbRewardActivity, pageResult.getList().get(0), "rules");
}
|
@SuppressWarnings("unchecked")
public QueryMetadataHolder handleStatement(
final ServiceContext serviceContext,
final Map<String, Object> configOverrides,
final Map<String, Object> requestProperties,
final PreparedStatement<?> statement,
final Optional<Boolean> isInternalRequest,
final MetricsCallbackHolder metricsCallbackHolder,
final Context context,
final boolean excludeTombstones
) {
if (statement.getStatement() instanceof Query) {
return handleQuery(
serviceContext,
(PreparedStatement<Query>) statement,
isInternalRequest,
metricsCallbackHolder,
configOverrides,
requestProperties,
context,
excludeTombstones
);
} else {
return QueryMetadataHolder.unhandled();
}
}
|
@Test
public void shouldRateLimitStreamPullQueries() {
when(ksqlEngine.createStreamPullQuery(any(), any(), any(), anyBoolean()))
.thenReturn(streamPullQueryMetadata);
// When:
queryExecutor.handleStatement(serviceContext, ImmutableMap.of(), ImmutableMap.of(),
pullQuery, Optional.empty(), metricsCallbackHolder, context, false);
Exception e = assertThrows(KsqlException.class, () ->
queryExecutor.handleStatement(serviceContext, ImmutableMap.of(), ImmutableMap.of(),
pullQuery, Optional.empty(), metricsCallbackHolder, context, false));
// Then:
assertThat(e.getMessage(),
is("Host is at rate limit for pull queries. Currently set to 1.0 qps."));
}
|
public static Set<String> getNotCreatedRootNodesInRestartRuntimeDag(
@NotNull Map<String, StepTransition> runtimeDag, @NotNull RestartConfig restartConfig) {
Set<String> rootStepIds =
runtimeDag.entrySet().stream()
.filter(entry -> entry.getValue().getPredecessors().isEmpty())
.map(Map.Entry::getKey)
.collect(Collectors.toSet());
Optional.ofNullable(RunRequest.getCurrentNode(restartConfig).getStepId())
.ifPresent(
stepId ->
Checks.checkTrue(
rootStepIds.remove(stepId),
"Invalid state: stepId [%s] should be one of root nodes in the DAG",
stepId));
return rootStepIds;
}
|
@Test
public void testGetNotCreatedRootNodesInRestartRuntimeDag() {
RestartConfig config =
RestartConfig.builder().addRestartNode("sample-dag-test-1", 1, "job_3").build();
Set<String> actual = DagHelper.getNotCreatedRootNodesInRestartRuntimeDag(runtimeDag1, config);
Assert.assertEquals(Collections.singleton("job_9"), actual);
}
|
@SuppressWarnings("unused") // Required for automatic type inference
public static <K> Builder0<K> forClass(final Class<K> type) {
return new Builder0<>();
}
|
@Test(expected = IllegalArgumentException.class)
public void shouldThrowOnDuplicateKeyR2() {
HandlerMaps.forClass(BaseType.class)
.withArgTypes(String.class, Integer.class)
.withReturnType(Number.class)
.put(LeafTypeA.class, handlerR2_1)
.put(LeafTypeA.class, handlerR2_2);
}
|
@Override
public DescriptiveUrl toDownloadUrl(final Path file, final Sharee sharee, CreateDownloadShareRequest options, final PasswordCallback callback) throws BackgroundException {
try {
if(log.isDebugEnabled()) {
log.debug(String.format("Create download share for %s", file));
}
if(null == options) {
options = new CreateDownloadShareRequest();
log.warn(String.format("Use default share options %s", options));
}
final Long fileid = Long.parseLong(nodeid.getVersionId(file));
final Host bookmark = session.getHost();
if(new SDSTripleCryptEncryptorFeature(session, nodeid).isEncrypted(file)) {
// get existing file key associated with the sharing user
final FileKey key = new NodesApi(session.getClient()).requestUserFileKey(fileid, null, null);
final EncryptedFileKey encFileKey = TripleCryptConverter.toCryptoEncryptedFileKey(key);
final UserKeyPairContainer keyPairContainer = session.getKeyPairForFileKey(encFileKey.getVersion());
final UserKeyPair userKeyPair = TripleCryptConverter.toCryptoUserKeyPair(keyPairContainer);
final Credentials passphrase = new TripleCryptKeyPair().unlock(callback, bookmark, userKeyPair);
final PlainFileKey plainFileKey = Crypto.decryptFileKey(encFileKey, userKeyPair.getUserPrivateKey(), passphrase.getPassword().toCharArray());
// encrypt file key with a new key pair
final UserKeyPair pair;
if(null == options.getPassword()) {
pair = Crypto.generateUserKeyPair(session.requiredKeyPairVersion(), callback.prompt(
bookmark, LocaleFactory.localizedString("Passphrase", "Cryptomator"),
LocaleFactory.localizedString("Provide additional login credentials", "Credentials"), new LoginOptions().icon(session.getHost().getProtocol().disk())
).getPassword().toCharArray());
}
else {
pair = Crypto.generateUserKeyPair(session.requiredKeyPairVersion(), options.getPassword().toCharArray());
}
final EncryptedFileKey encryptedFileKey = Crypto.encryptFileKey(plainFileKey, pair.getUserPublicKey());
options.setPassword(null);
options.setKeyPair(TripleCryptConverter.toSwaggerUserKeyPairContainer(pair));
options.setFileKey(TripleCryptConverter.toSwaggerFileKey(encryptedFileKey));
}
final DownloadShare share = new SharesApi(session.getClient()).createDownloadShare(
options.nodeId(fileid), StringUtils.EMPTY, null);
final String help;
if(null == share.getExpireAt()) {
help = MessageFormat.format(LocaleFactory.localizedString("{0} URL"), LocaleFactory.localizedString("Pre-Signed", "S3"));
}
else {
final long expiry = share.getExpireAt().getMillis();
help = MessageFormat.format(LocaleFactory.localizedString("{0} URL"), LocaleFactory.localizedString("Pre-Signed", "S3")) + " (" + MessageFormat.format(LocaleFactory.localizedString("Expires {0}", "S3") + ")",
UserDateFormatterFactory.get().getShortFormat(expiry * 1000)
);
}
final Matcher matcher = Pattern.compile(SDSSession.VERSION_REGEX).matcher(session.softwareVersion().getRestApiVersion());
if(matcher.matches()) {
if(new Version(matcher.group(1)).compareTo(new Version("4.26")) < 0) {
return new DescriptiveUrl(URI.create(String.format("%s://%s/#/public/shares-downloads/%s",
bookmark.getProtocol().getScheme(),
bookmark.getHostname(),
share.getAccessKey())),
DescriptiveUrl.Type.signed, help);
}
}
return new DescriptiveUrl(URI.create(String.format("%s://%s/public/download-shares/%s",
bookmark.getProtocol().getScheme(),
bookmark.getHostname(),
share.getAccessKey())),
DescriptiveUrl.Type.signed, help);
}
catch(ApiException e) {
throw new SDSExceptionMappingService(nodeid).map(e);
}
catch(CryptoException e) {
throw new TripleCryptExceptionMappingService().map(e);
}
}
|
@Test
public void testShareTopLevelRoom() throws Exception {
final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session);
final Path room = new SDSDirectoryFeature(session, nodeid).mkdir(new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
final DescriptiveUrl url = new SDSShareFeature(session, nodeid).toDownloadUrl(room,
Share.Sharee.world, new CreateDownloadShareRequest()
.expiration(new ObjectExpiration().enableExpiration(false))
.notifyCreator(false)
.sendMail(false)
.sendSms(false)
.password(null)
.mailRecipients(null)
.mailSubject(null)
.mailBody(null)
.maxDownloads(null), new DisabledPasswordCallback());
assertNotEquals(DescriptiveUrl.EMPTY, url);
assertEquals(DescriptiveUrl.Type.signed, url.getType());
assertTrue(url.getUrl().startsWith("https://duck.dracoon.com/public/download-shares/"));
new SDSDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
public static UAnnotation create(UTree<?> annotationType, List<UExpression> arguments) {
return new AutoValue_UAnnotation(annotationType, ImmutableList.copyOf(arguments));
}
|
@Test
public void serialization() {
SerializableTester.reserializeAndAssert(
UAnnotation.create(
UClassIdent.create("java.lang.SuppressWarnings"), ULiteral.stringLit("cast")));
}
|
public void reset(final int timeoutMs) {
this.lock.lock();
this.timeoutMs = timeoutMs;
try {
if (this.stopped) {
return;
}
if (this.running) {
schedule();
}
} finally {
this.lock.unlock();
}
}
|
@Test
public void testReset() throws Exception {
this.timer.start();
assertEquals(50, this.timer.getTimeoutMs());
for (int i = 0; i < 10; i++) {
Thread.sleep(80);
this.timer.reset();
}
assertEquals(10, this.timer.counter.get(), 3);
this.timer.reset(100);
for (int i = 0; i < 10; i++) {
Thread.sleep(80);
this.timer.reset();
}
assertEquals(10, this.timer.counter.get(), 3);
}
|
@Subscribe
public void onChatMessage(ChatMessage chatMessage)
{
if (chatMessage.getType() != ChatMessageType.TRADE
&& chatMessage.getType() != ChatMessageType.GAMEMESSAGE
&& chatMessage.getType() != ChatMessageType.SPAM
&& chatMessage.getType() != ChatMessageType.FRIENDSCHATNOTIFICATION)
{
return;
}
String message = chatMessage.getMessage();
Matcher matcher = KILLCOUNT_PATTERN.matcher(message);
if (matcher.find())
{
final String boss = matcher.group("boss");
final int kc = Integer.parseInt(matcher.group("kc"));
final String pre = matcher.group("pre");
final String post = matcher.group("post");
if (Strings.isNullOrEmpty(pre) && Strings.isNullOrEmpty(post))
{
unsetKc(boss);
return;
}
String renamedBoss = KILLCOUNT_RENAMES
.getOrDefault(boss, boss)
// The config service doesn't support keys with colons in them
.replace(":", "");
if (boss != renamedBoss)
{
// Unset old TOB kc
unsetKc(boss);
unsetPb(boss);
unsetKc(boss.replace(":", "."));
unsetPb(boss.replace(":", "."));
// Unset old story mode
unsetKc("Theatre of Blood Story Mode");
unsetPb("Theatre of Blood Story Mode");
}
setKc(renamedBoss, kc);
// We either already have the pb, or need to remember the boss for the upcoming pb
if (lastPb > -1)
{
log.debug("Got out-of-order personal best for {}: {}", renamedBoss, lastPb);
if (renamedBoss.contains("Theatre of Blood"))
{
// TOB team size isn't sent in the kill message, but can be computed from varbits
int tobTeamSize = tobTeamSize();
lastTeamSize = tobTeamSize == 1 ? "Solo" : (tobTeamSize + " players");
}
else if (renamedBoss.contains("Tombs of Amascut"))
{
// TOA team size isn't sent in the kill message, but can be computed from varbits
int toaTeamSize = toaTeamSize();
lastTeamSize = toaTeamSize == 1 ? "Solo" : (toaTeamSize + " players");
}
final double pb = getPb(renamedBoss);
// If a raid with a team size, only update the pb if it is lower than the existing pb
// so that the pb is the overall lowest of any team size
if (lastTeamSize == null || pb == 0 || lastPb < pb)
{
log.debug("Setting overall pb (old: {})", pb);
setPb(renamedBoss, lastPb);
}
if (lastTeamSize != null)
{
log.debug("Setting team size pb: {}", lastTeamSize);
setPb(renamedBoss + " " + lastTeamSize, lastPb);
}
lastPb = -1;
lastTeamSize = null;
}
else
{
lastBossKill = renamedBoss;
lastBossTime = client.getTickCount();
}
return;
}
matcher = DUEL_ARENA_WINS_PATTERN.matcher(message);
if (matcher.find())
{
final int oldWins = getKc("Duel Arena Wins");
final int wins = matcher.group(2).equals("one") ? 1 :
Integer.parseInt(matcher.group(2).replace(",", ""));
final String result = matcher.group(1);
int winningStreak = getKc("Duel Arena Win Streak");
int losingStreak = getKc("Duel Arena Lose Streak");
if (result.equals("won") && wins > oldWins)
{
losingStreak = 0;
winningStreak += 1;
}
else if (result.equals("were defeated"))
{
losingStreak += 1;
winningStreak = 0;
}
else
{
log.warn("unrecognized duel streak chat message: {}", message);
}
setKc("Duel Arena Wins", wins);
setKc("Duel Arena Win Streak", winningStreak);
setKc("Duel Arena Lose Streak", losingStreak);
}
matcher = DUEL_ARENA_LOSSES_PATTERN.matcher(message);
if (matcher.find())
{
int losses = matcher.group(1).equals("one") ? 1 :
Integer.parseInt(matcher.group(1).replace(",", ""));
setKc("Duel Arena Losses", losses);
}
matcher = KILL_DURATION_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = NEW_PB_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = RAIDS_PB_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = RAIDS_DURATION_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = HS_PB_PATTERN.matcher(message);
if (matcher.find())
{
int floor = Integer.parseInt(matcher.group("floor"));
String floortime = matcher.group("floortime");
String floorpb = matcher.group("floorpb");
String otime = matcher.group("otime");
String opb = matcher.group("opb");
String pb = MoreObjects.firstNonNull(floorpb, floortime);
setPb("Hallowed Sepulchre Floor " + floor, timeStringToSeconds(pb));
if (otime != null)
{
pb = MoreObjects.firstNonNull(opb, otime);
setPb("Hallowed Sepulchre", timeStringToSeconds(pb));
}
}
matcher = HS_KC_FLOOR_PATTERN.matcher(message);
if (matcher.find())
{
int floor = Integer.parseInt(matcher.group(1));
int kc = Integer.parseInt(matcher.group(2).replaceAll(",", ""));
setKc("Hallowed Sepulchre Floor " + floor, kc);
}
matcher = HS_KC_GHC_PATTERN.matcher(message);
if (matcher.find())
{
int kc = Integer.parseInt(matcher.group(1).replaceAll(",", ""));
setKc("Hallowed Sepulchre", kc);
}
matcher = HUNTER_RUMOUR_KC_PATTERN.matcher(message);
if (matcher.find())
{
int kc = Integer.parseInt(matcher.group(1).replaceAll(",", ""));
setKc("Hunter Rumours", kc);
}
if (lastBossKill != null && lastBossTime != client.getTickCount())
{
lastBossKill = null;
lastBossTime = -1;
}
matcher = COLLECTION_LOG_ITEM_PATTERN.matcher(message);
if (matcher.find())
{
String item = matcher.group(1);
int petId = findPet(item);
if (petId != -1)
{
final List<Integer> petList = new ArrayList<>(getPetList());
if (!petList.contains(petId))
{
log.debug("New pet added: {}/{}", item, petId);
petList.add(petId);
setPetList(petList);
}
}
}
matcher = GUARDIANS_OF_THE_RIFT_PATTERN.matcher(message);
if (matcher.find())
{
int kc = Integer.parseInt(matcher.group(1));
setKc("Guardians of the Rift", kc);
}
}
|
@Test
public void testAgilityLap()
{
final String NEW_PB = "Lap duration: <col=ff0000>1:01</col> (new personal best).";
final String NEW_PB_PRECISE = "Lap duration: <col=ff0000>1:01.20</col> (new personal best).";
// This sets lastBoss
ChatMessage chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Your Prifddinas Agility Course lap count is: <col=ff0000>2</col>.", null, 0);
chatCommandsPlugin.onChatMessage(chatMessage);
chatMessage = new ChatMessage(null, GAMEMESSAGE, "", NEW_PB, null, 0);
chatCommandsPlugin.onChatMessage(chatMessage);
verify(configManager).setRSProfileConfiguration("personalbest", "prifddinas agility course", 61.0);
verify(configManager).setRSProfileConfiguration("killcount", "prifddinas agility course", 2);
// Precise times
chatMessage = new ChatMessage(null, GAMEMESSAGE, "", NEW_PB_PRECISE, null, 0);
chatCommandsPlugin.onChatMessage(chatMessage);
verify(configManager).setRSProfileConfiguration("personalbest", "prifddinas agility course", 61.2);
}
|
public static String toString(Throwable e) {
UnsafeStringWriter w = new UnsafeStringWriter();
PrintWriter p = new PrintWriter(w);
p.print(e.getClass().getName());
if (e.getMessage() != null) {
p.print(": " + e.getMessage());
}
p.println();
try {
e.printStackTrace(p);
return w.toString();
} finally {
p.close();
}
}
|
@Test
void testExceptionToStringWithMessage() throws Exception {
String s = StringUtils.toString("greeting", new RuntimeException("abc"));
assertThat(s, containsString("greeting"));
assertThat(s, containsString("java.lang.RuntimeException: abc"));
}
|
void setRequestPath(ServletRequest req, final String destinationPath) {
if (req instanceof AwsProxyHttpServletRequest) {
((AwsProxyHttpServletRequest) req).getAwsProxyRequest().setPath(dispatchTo);
return;
}
if (req instanceof AwsHttpApiV2ProxyHttpServletRequest) {
((AwsHttpApiV2ProxyHttpServletRequest) req).getRequest().setRawPath(destinationPath);
return;
}
log.debug("Request is not an proxy request generated by this library, attempting to extract the proxy event type from the request attributes");
if (req.getAttribute(API_GATEWAY_EVENT_PROPERTY) != null && req.getAttribute(API_GATEWAY_EVENT_PROPERTY) instanceof AwsProxyRequest) {
((AwsProxyRequest)req.getAttribute(API_GATEWAY_EVENT_PROPERTY)).setPath(dispatchTo);
return;
}
if (req.getAttribute(HTTP_API_EVENT_PROPERTY) != null && req.getAttribute(HTTP_API_EVENT_PROPERTY) instanceof HttpApiV2ProxyRequest) {
((HttpApiV2ProxyRequest)req.getAttribute(HTTP_API_EVENT_PROPERTY)).setRawPath(destinationPath);
return;
}
throw new IllegalStateException("Could not set new target path for the given ServletRequest object");
}
|
@Test
void setPath_forwardByPath_proxyRequestObjectInPropertyReferencesSameProxyRequest() throws InvalidRequestEventException {
AwsProxyRequest proxyRequest = new AwsProxyRequestBuilder("/hello", "GET").build();
HttpServletRequest servletRequest = requestReader.readRequest(proxyRequest, null, new MockLambdaContext(), ContainerConfig.defaultConfig());
AwsProxyRequestDispatcher dispatcher = new AwsProxyRequestDispatcher(FORWARD_PATH, false, null);
dispatcher.setRequestPath(servletRequest, FORWARD_PATH);
assertEquals(FORWARD_PATH, servletRequest.getRequestURI());
}
|
public static void generateHostCert(File keystore, String password, String host, int validity) throws IOException {
// generate the keypair for the host
generateSignedCert(keystore, password, validity,
host, // alias
host); // subject
}
|
@Test
public void testIPBasedCert() throws Exception {
KeyToolUtils.generateHostCert(keystore, password, "10.1.2.3", validity);
}
|
public LoginResponse login(LoginRequest request) {
User user = userRepository.findByIdentificationNumber(request.getIdentificationNumber()).orElseThrow(() ->GenericException.builder()
.httpStatus(HttpStatus.NOT_FOUND)
.logMessage(this.getClass().getName() + ".login user not found with identification number {0}", request.getIdentificationNumber() )
.message(ErrorCode.USER_NOT_FOUND)
.build());
passwordEncoder.matches(request.getPassword(),user.getPassword());
return LoginResponse.builder().token(authService.createToken(user)).build();
}
|
@Test
void login_successfulLogin() {
// Arrange
LoginRequest request = new LoginRequest("1234567890", "password");
User user = new User("1234567890", "John", "Doe", "encodedPassword");
String token = "validToken";
when(userRepository.findByIdentificationNumber(request.getIdentificationNumber())).thenReturn(Optional.of(user));
when(passwordEncoder.matches(request.getPassword(), user.getPassword())).thenReturn(true);
when(authService.createToken(user)).thenReturn(token);
// Act
LoginResponse response = userService.login(request);
// Assert
assertEquals(token, response.getToken());
}
|
public static void writeAndFlushWithVoidPromise(ChannelOutboundInvoker ctx, ByteBuf msg) {
ctx.writeAndFlush(msg, ctx.voidPromise());
}
|
@Test
public void testWriteAndFlushWithVoidPromise() {
final ChannelOutboundInvoker ctx = mock(ChannelOutboundInvoker.class);
final VoidChannelPromise voidChannelPromise = mock(VoidChannelPromise.class);
when(ctx.voidPromise()).thenReturn(voidChannelPromise);
final byte[] data = "test".getBytes(StandardCharsets.UTF_8);
final ByteBuf byteBuf = Unpooled.wrappedBuffer(data, 0, data.length);
try {
NettyChannelUtil.writeAndFlushWithVoidPromise(ctx, byteBuf);
verify(ctx).writeAndFlush(same(byteBuf), same(voidChannelPromise));
verify(ctx).voidPromise();
} finally {
byteBuf.release();
}
}
|
private void stop(int numOfServicesStarted, boolean stopOnlyStartedServices) {
// stop in reverse order of start
Exception firstException = null;
List<Service> services = getServices();
for (int i = numOfServicesStarted - 1; i >= 0; i--) {
Service service = services.get(i);
if (LOG.isDebugEnabled()) {
LOG.debug("Stopping service #" + i + ": " + service);
}
STATE state = service.getServiceState();
//depending on the stop police
if (state == STATE.STARTED
|| (!stopOnlyStartedServices && state == STATE.INITED)) {
Exception ex = ServiceOperations.stopQuietly(LOG, service);
if (ex != null && firstException == null) {
firstException = ex;
}
}
}
//after stopping all services, rethrow the first exception raised
if (firstException != null) {
throw ServiceStateException.convert(firstException);
}
}
|
@Test
public void testServiceLifecycleNoChildren() {
ServiceManager serviceManager = new ServiceManager("ServiceManager");
serviceManager.init(new Configuration());
serviceManager.start();
serviceManager.stop();
}
|
void storeEdits(byte[] inputData, long newStartTxn, long newEndTxn,
int newLayoutVersion) {
if (newStartTxn < 0 || newEndTxn < newStartTxn) {
Journal.LOG.error(String.format("Attempted to cache data of length %d " +
"with newStartTxn %d and newEndTxn %d",
inputData.length, newStartTxn, newEndTxn));
return;
}
try (AutoCloseableLock l = writeLock.acquire()) {
if (newLayoutVersion != layoutVersion) {
try {
updateLayoutVersion(newLayoutVersion, newStartTxn);
} catch (IOException ioe) {
Journal.LOG.error(String.format("Unable to save new edits [%d, %d] " +
"due to exception when updating to new layout version %d",
newStartTxn, newEndTxn, newLayoutVersion), ioe);
return;
}
} else if (lowestTxnId == INVALID_TXN_ID) {
Journal.LOG.info("Initializing edits cache starting from txn ID " +
newStartTxn);
initialize(newStartTxn);
} else if (highestTxnId + 1 != newStartTxn) {
// Cache is out of sync; clear to avoid storing noncontiguous regions
Journal.LOG.error(String.format("Edits cache is out of sync; " +
"looked for next txn id at %d but got start txn id for " +
"cache put request at %d. Reinitializing at new request.",
highestTxnId + 1, newStartTxn));
initialize(newStartTxn);
}
while ((totalSize + inputData.length) > capacity && !dataMap.isEmpty()) {
Map.Entry<Long, byte[]> lowest = dataMap.firstEntry();
dataMap.remove(lowest.getKey());
totalSize -= lowest.getValue().length;
}
if (inputData.length > capacity) {
initialize(INVALID_TXN_ID);
Journal.LOG.warn(String.format("A single batch of edits was too " +
"large to fit into the cache: startTxn = %d, endTxn = %d, " +
"input length = %d. The cache size (%s) or cache fraction (%s) must be " +
"increased for it to work properly (current capacity %d)." +
"Cache is now empty.",
newStartTxn, newEndTxn, inputData.length,
DFSConfigKeys.DFS_JOURNALNODE_EDIT_CACHE_SIZE_KEY,
DFSConfigKeys.DFS_JOURNALNODE_EDIT_CACHE_SIZE_FRACTION_KEY, capacity));
return;
}
if (dataMap.isEmpty()) {
lowestTxnId = newStartTxn;
} else {
lowestTxnId = dataMap.firstKey();
}
dataMap.put(newStartTxn, inputData);
highestTxnId = newEndTxn;
totalSize += inputData.length;
}
}
|
@Test
public void testCacheSingleSegment() throws Exception {
storeEdits(1, 20);
// Leading part of the segment
assertTxnCountAndContents(1, 5, 5);
// All of the segment
assertTxnCountAndContents(1, 20, 20);
// Past the segment
assertTxnCountAndContents(1, 40, 20);
// Trailing part of the segment
assertTxnCountAndContents(10, 11, 20);
// Trailing part of the segment, past the end
assertTxnCountAndContents(10, 20, 20);
}
|
@Override
public SortedSet<IndexRange> find(DateTime begin, DateTime end) {
final DBQuery.Query query = DBQuery.or(
DBQuery.and(
DBQuery.notExists("start"), // "start" has been used by the old index ranges in MongoDB
DBQuery.lessThanEquals(IndexRange.FIELD_BEGIN, end.getMillis()),
DBQuery.greaterThanEquals(IndexRange.FIELD_END, begin.getMillis())
),
DBQuery.and(
DBQuery.notExists("start"), // "start" has been used by the old index ranges in MongoDB
DBQuery.lessThanEquals(IndexRange.FIELD_BEGIN, 0L),
DBQuery.greaterThanEquals(IndexRange.FIELD_END, 0L)
)
);
try (DBCursor<MongoIndexRange> indexRanges = collection.find(query)) {
return ImmutableSortedSet.copyOf(IndexRange.COMPARATOR, (Iterator<? extends IndexRange>) indexRanges);
}
}
|
@Test
@MongoDBFixtures("MongoIndexRangeServiceTest.json")
public void testHandleIndexReopeningWhenNotManaged() throws Exception {
final DateTime begin = new DateTime(2016, 1, 1, 0, 0, DateTimeZone.UTC);
final DateTime end = new DateTime(2016, 1, 15, 0, 0, DateTimeZone.UTC);
when(indexSetRegistry.isManagedIndex("graylog_3")).thenReturn(false);
when(indices.indexRangeStatsOfIndex("graylog_3")).thenReturn(IndexRangeStats.EMPTY);
localEventBus.post(IndicesReopenedEvent.create(Collections.singleton("graylog_3")));
final SortedSet<IndexRange> indexRanges = indexRangeService.find(begin, end);
assertThat(indexRanges).isEmpty();
}
|
public URL getInterNodeListener(
final Function<URL, Integer> portResolver
) {
return getInterNodeListener(portResolver, LOGGER);
}
|
@Test
public void shouldUseExplicitInterNodeListenerIfSetToIpv6Loopback() {
// Given:
final URL expected = url("https://[::1]:12345");
final KsqlRestConfig config = new KsqlRestConfig(ImmutableMap.<String, Object>builder()
.putAll(MIN_VALID_CONFIGS)
.put(ADVERTISED_LISTENER_CONFIG, expected.toString())
.build()
);
// When:
final URL actual = config.getInterNodeListener(portResolver, logger);
// Then:
assertThat(actual, is(expected));
verifyLogsInterNodeListener(expected, QUOTED_INTER_NODE_LISTENER_CONFIG);
verifyLogsLoopBackWarning(expected, QUOTED_INTER_NODE_LISTENER_CONFIG);
verifyNoMoreInteractions(logger);
}
|
@Override
public ApplicationAttemptReport getApplicationAttemptReport(
ApplicationAttemptId applicationAttemptId)
throws YarnException, IOException {
TimelineEntity entity = readerClient.getApplicationAttemptEntity(
applicationAttemptId, "ALL", null);
return TimelineEntityV2Converter.convertToApplicationAttemptReport(entity);
}
|
@Test
public void testGetAppAttemptReport() throws IOException, YarnException {
final ApplicationId appId = ApplicationId.newInstance(0, 1);
final ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
when(spyTimelineReaderClient.getApplicationAttemptEntity(appAttemptId,
"ALL", null))
.thenReturn(createAppAttemptTimelineEntity(appAttemptId));
ApplicationAttemptReport report =
client.getApplicationAttemptReport(appAttemptId);
assertThat(report.getApplicationAttemptId()).isEqualTo(appAttemptId);
assertThat(report.getFinishTime()).isEqualTo(Integer.MAX_VALUE + 2L);
assertThat(report.getOriginalTrackingUrl()).
isEqualTo("test original tracking url");
}
|
@Override
public void onMsg(TbContext ctx, TbMsg msg) throws TbNodeException {
ctx.tellNext(msg, checkMatches(msg) ? TbNodeConnectionType.TRUE : TbNodeConnectionType.FALSE);
}
|
@Test
void givenTypeCircleAndConfigWithoutPerimeterKeyName_whenOnMsg_thenTrue() throws TbNodeException {
// GIVEN
var config = new TbGpsGeofencingFilterNodeConfiguration().defaultConfiguration();
config.setPerimeterKeyName(null);
node.init(ctx, new TbNodeConfiguration(JacksonUtil.valueToTree(config)));
DeviceId deviceId = new DeviceId(UUID.randomUUID());
TbMsgMetaData metadata = getMetadataForOldVersionCirclePerimeter();
TbMsg msg = getTbMsg(deviceId, metadata,
POINT_INSIDE_CIRCLE.getLatitude(), POINT_INSIDE_CIRCLE.getLongitude());
// WHEN
node.onMsg(ctx, msg);
// THEN
ArgumentCaptor<TbMsg> newMsgCaptor = ArgumentCaptor.forClass(TbMsg.class);
verify(ctx, times(1)).tellNext(newMsgCaptor.capture(), eq(TbNodeConnectionType.TRUE));
verify(ctx, never()).tellFailure(any(), any());
TbMsg newMsg = newMsgCaptor.getValue();
assertThat(newMsg).isNotNull();
assertThat(newMsg).isSameAs(msg);
}
|
@Udf
public Long round(@UdfParameter final long val) {
return val;
}
|
@Test
public void shouldRoundDoubleWithDecimalPlacesNegative() {
assertThat(udf.round(-1.0d, 0), is(-1.0d));
assertThat(udf.round(-1.1d, 0), is(-1.0d));
assertThat(udf.round(-1.5d, 0), is(-1.0d));
assertThat(udf.round(-1.75d, 0), is(-2.0d));
assertThat(udf.round(-100.1d, 0), is(-100.0d));
assertThat(udf.round(-100.5d, 0), is(-100.0d));
assertThat(udf.round(-100.75d, 0), is(-101.0d));
assertThat(udf.round(-100.10d, 1), is(-100.1d));
assertThat(udf.round(-100.11d, 1), is(-100.1d));
assertThat(udf.round(-100.15d, 1), is(-100.1d));
assertThat(udf.round(-100.17d, 1), is(-100.2d));
assertThat(udf.round(-100.110d, 2), is(-100.11d));
assertThat(udf.round(-100.111d, 2), is(-100.11d));
assertThat(udf.round(-100.115d, 2), is(-100.11d));
assertThat(udf.round(-100.117d, 2), is(-100.12d));
assertThat(udf.round(-100.1110d, 3), is(-100.111d));
assertThat(udf.round(-100.1111d, 3), is(-100.111d));
assertThat(udf.round(-100.1115d, 3), is(-100.111d));
assertThat(udf.round(-100.1117d, 3), is(-100.112d));
assertThat(udf.round(-12345.67d, -1), is(-12350d));
assertThat(udf.round(-12345.67d, -2), is(-12300d));
assertThat(udf.round(-12345.67d, -3), is(-12000d));
assertThat(udf.round(-12345.67d, -4), is(-10000d));
assertThat(udf.round(-12345.67d, -5), is(0d));
}
|
public ShardingSphereDatabase getDatabase(final String name) {
ShardingSpherePreconditions.checkNotEmpty(name, NoDatabaseSelectedException::new);
ShardingSphereMetaData metaData = getMetaDataContexts().getMetaData();
ShardingSpherePreconditions.checkState(metaData.containsDatabase(name), () -> new UnknownDatabaseException(name));
return metaData.getDatabase(name);
}
|
@Test
void assertGetDatabaseWithNull() {
assertThrows(NoDatabaseSelectedException.class, () -> contextManager.getDatabase(null));
}
|
@Override
@InterfaceAudience.Private
public Serializer<T> getSerializer(Class<T> c) {
return new AvroSerializer(c);
}
|
@Test
public void testAcceptHandlingPrimitivesAndArrays() throws Exception {
SerializationFactory factory = new SerializationFactory(conf);
assertNull(factory.getSerializer(byte[].class));
assertNull(factory.getSerializer(byte.class));
}
|
public static boolean shouldEnablePushdownForTable(ConnectorSession session, Table table, String path, Optional<Partition> optionalPartition)
{
if (!isS3SelectPushdownEnabled(session)) {
return false;
}
if (path == null) {
return false;
}
// Hive table partitions could be on different storages,
// as a result, we have to check each individual optionalPartition
Properties schema = optionalPartition
.map(partition -> getHiveSchema(partition, table))
.orElseGet(() -> getHiveSchema(table));
return shouldEnablePushdownForTable(table, path, schema);
}
|
@Test
public void testShouldNotEnableSelectPushdownWhenDisabledOnSession()
{
ConnectorSession testSession = initTestingConnectorSession(false);
assertFalse(shouldEnablePushdownForTable(testSession, table, "", Optional.empty()));
}
|
public void createView(View view, boolean replace, boolean ifNotExists) {
if (ifNotExists) {
relationsStorage.putIfAbsent(view.name(), view);
} else if (replace) {
relationsStorage.put(view.name(), view);
} else if (!relationsStorage.putIfAbsent(view.name(), view)) {
throw QueryException.error("Mapping or view already exists: " + view.name());
}
}
|
@Test
public void when_createsDuplicateViewsIfReplace_then_succeeds() {
// given
View view = view();
// when
catalog.createView(view, true, false);
// then
verify(relationsStorage).put(eq(view.name()), isA(View.class));
}
|
public StepBreakpoint addStepBreakpoint(
String workflowId,
long version,
long instanceId,
long runId,
String stepId,
long stepAttemptId,
User user) {
final String revisedWorkflowId = getRevisedWorkflowId(workflowId, stepId, true);
return withMetricLogError(
() ->
withRetryableTransaction(
conn -> {
try (PreparedStatement stmt = conn.prepareStatement(ADD_STEP_BREAKPOINT)) {
int idx = 0;
stmt.setString(++idx, revisedWorkflowId);
stmt.setLong(++idx, version);
stmt.setLong(++idx, instanceId);
stmt.setLong(++idx, runId);
stmt.setString(++idx, stepId);
stmt.setLong(++idx, stepAttemptId);
stmt.setString(++idx, toJson(user));
try (ResultSet rs = stmt.executeQuery()) {
if (rs.next()) {
return stepBreakpointFromResultSet(rs);
} else {
throw new MaestroBadRequestException(
Collections.emptyList(),
"Breakpoint could not be set with identifier [%s][%d][%d][%d][%s][%d]",
workflowId,
version,
instanceId,
runId,
stepId,
stepAttemptId);
}
}
}
}),
"addStepBreakpointForStepIdentifier",
"Failed to addStepBreakpointForStepIdentifier [{}][{}][{}][{}][{}][{}]",
workflowId,
version,
instanceId,
runId,
stepId,
stepAttemptId);
}
|
@Test
public void testAddStepBreakpointForAllWorkflowVersionsInvalid() {
when(workflowDao.getWorkflowDefinition(anyString(), anyString())).thenReturn(wfd);
AssertHelper.assertThrows(
"invalid step",
MaestroBadRequestException.class,
"Breakpoint can't be set as stepId [non-exist] is not present for the workflowId"
+ " [sample-active-wf-with-props]",
() ->
maestroStepBreakpointDao.addStepBreakpoint(
TEST_WORKFLOW_ID1,
Constants.MATCH_ALL_WORKFLOW_VERSIONS,
Constants.MATCH_ALL_WORKFLOW_INSTANCES,
Constants.MATCH_ALL_RUNS,
TEST_NON_EXIST_STEP_ID,
Constants.MATCH_ALL_RUNS,
TEST_USER));
}
|
public static boolean equivalent(
Expression left, Expression right, Types.StructType struct, boolean caseSensitive) {
return Binder.bind(struct, Expressions.rewriteNot(left), caseSensitive)
.isEquivalentTo(Binder.bind(struct, Expressions.rewriteNot(right), caseSensitive));
}
|
@Test
public void testInEquivalence() {
assertThat(
ExpressionUtil.equivalent(
Expressions.in("id", 1, 2, 1), Expressions.in("id", 2, 1, 2), STRUCT, true))
.as("Should ignore duplicate longs (in)")
.isTrue();
assertThat(
ExpressionUtil.equivalent(
Expressions.notIn("id", 1, 2, 1), Expressions.notIn("id", 2, 1, 2), STRUCT, true))
.as("Should ignore duplicate longs (notIn)")
.isTrue();
assertThat(
ExpressionUtil.equivalent(
Expressions.in("data", "a", "b", "a"),
Expressions.in("data", "b", "a"),
STRUCT,
true))
.as("Should ignore duplicate strings (in)")
.isTrue();
assertThat(
ExpressionUtil.equivalent(
Expressions.notIn("data", "b", "b"), Expressions.notIn("data", "b"), STRUCT, true))
.as("Should ignore duplicate strings (notIn)")
.isTrue();
assertThat(
ExpressionUtil.equivalent(
Expressions.in("data", "a"), Expressions.equal("data", "a"), STRUCT, true))
.as("Should detect equivalence with equal (in, string)")
.isTrue();
assertThat(
ExpressionUtil.equivalent(
Expressions.notIn("id", 1), Expressions.notEqual("id", 1), STRUCT, true))
.as("Should detect equivalence with notEqual (notIn, long)")
.isTrue();
assertThat(
ExpressionUtil.equivalent(
Expressions.in("id", 1, 2, 3), Expressions.in("id", 1, 2), STRUCT, true))
.as("Should detect different sets (in, long)")
.isFalse();
assertThat(
ExpressionUtil.equivalent(
Expressions.notIn("data", "a", "b"), Expressions.notIn("data", "a"), STRUCT, true))
.as("Should detect different sets (notIn, string)")
.isFalse();
}
|
public void ensureCapacity(@NonNegative long maximumSize) {
requireArgument(maximumSize >= 0);
int maximum = (int) Math.min(maximumSize, Integer.MAX_VALUE >>> 1);
if ((table != null) && (table.length >= maximum)) {
return;
}
table = new long[Math.max(Caffeine.ceilingPowerOfTwo(maximum), 8)];
sampleSize = (maximumSize == 0) ? 10 : (10 * maximum);
blockMask = (table.length >>> 3) - 1;
if (sampleSize <= 0) {
sampleSize = Integer.MAX_VALUE;
}
size = 0;
}
|
@Test(dataProvider = "sketch")
public void ensureCapacity_negative(FrequencySketch<Integer> sketch) {
assertThrows(IllegalArgumentException.class, () -> sketch.ensureCapacity(-1));
}
|
@Override
public Map<RedisClusterNode, Collection<RedisClusterNode>> clusterGetMasterSlaveMap() {
Iterable<RedisClusterNode> res = clusterGetNodes();
Set<RedisClusterNode> masters = new HashSet<RedisClusterNode>();
for (Iterator<RedisClusterNode> iterator = res.iterator(); iterator.hasNext();) {
RedisClusterNode redisClusterNode = iterator.next();
if (redisClusterNode.isMaster()) {
masters.add(redisClusterNode);
}
}
Map<RedisClusterNode, Collection<RedisClusterNode>> result = new HashMap<RedisClusterNode, Collection<RedisClusterNode>>();
for (Iterator<RedisClusterNode> iterator = res.iterator(); iterator.hasNext();) {
RedisClusterNode redisClusterNode = iterator.next();
for (RedisClusterNode masterNode : masters) {
if (redisClusterNode.getMasterId() != null
&& redisClusterNode.getMasterId().equals(masterNode.getId())) {
Collection<RedisClusterNode> list = result.get(masterNode);
if (list == null) {
list = new ArrayList<RedisClusterNode>();
result.put(masterNode, list);
}
list.add(redisClusterNode);
}
}
}
return result;
}
|
@Test
public void testClusterGetMasterSlaveMap() {
Map<RedisClusterNode, Collection<RedisClusterNode>> map = connection.clusterGetMasterSlaveMap();
assertThat(map).hasSize(3);
for (Collection<RedisClusterNode> slaves : map.values()) {
assertThat(slaves).hasSize(1);
}
}
|
@GetMapping("/getUserPermissionByToken")
public ShenyuAdminResult getUserPermissionByToken(@RequestParam(name = "token") final String token) {
PermissionMenuVO permissionMenuVO = permissionService.getPermissionMenu(token);
return Optional.ofNullable(permissionMenuVO)
.map(item -> ShenyuAdminResult.success(ShenyuResultMessage.MENU_SUCCESS, item))
.orElseGet(() -> ShenyuAdminResult.error(ShenyuResultMessage.MENU_FAILED));
}
|
@Test
public void testGetUserPermissionByToken() {
final PermissionMenuVO permissionMenuVO = new PermissionMenuVO(
Collections.singletonList(new MenuInfo("id", "name", "url", "component",
new Meta("icon", "title"), Collections.emptyList(), 0)),
Collections.singletonList(new AuthPerm("perms1", "description1", "icon")),
Collections.singletonList(new AuthPerm("perms2", "description2", "icon")));
when(mockPermissionService.getPermissionMenu("token")).thenReturn(permissionMenuVO);
final ShenyuAdminResult result = permissionController.getUserPermissionByToken("token");
assertThat(result.getCode(), is(CommonErrorCode.SUCCESSFUL));
assertThat(result.getMessage(), is(ShenyuResultMessage.MENU_SUCCESS));
assertThat(result.getData(), is(permissionMenuVO));
}
|
static boolean isRemoteSegmentWithinLeaderEpochs(RemoteLogSegmentMetadata segmentMetadata,
long logEndOffset,
NavigableMap<Integer, Long> leaderEpochs) {
long segmentEndOffset = segmentMetadata.endOffset();
// Filter epochs that does not have any messages/records associated with them.
NavigableMap<Integer, Long> segmentLeaderEpochs = buildFilteredLeaderEpochMap(segmentMetadata.segmentLeaderEpochs());
// Check for out of bound epochs between segment epochs and current leader epochs.
Integer segmentLastEpoch = segmentLeaderEpochs.lastKey();
if (segmentLastEpoch < leaderEpochs.firstKey() || segmentLastEpoch > leaderEpochs.lastKey()) {
LOGGER.debug("Segment {} is not within the partition leader epoch lineage. " +
"Remote segment epochs: {} and partition leader epochs: {}",
segmentMetadata.remoteLogSegmentId(), segmentLeaderEpochs, leaderEpochs);
return false;
}
// There can be overlapping remote log segments in the remote storage. (eg)
// leader-epoch-file-cache: {(5, 10), (7, 15), (9, 100)}
// segment1: offset-range = 5-50, Broker = 0, epochs = {(5, 10), (7, 15)}
// segment2: offset-range = 14-150, Broker = 1, epochs = {(5, 14), (7, 15), (9, 100)}, after leader-election.
// When the segment1 gets deleted, then the log-start-offset = 51 and leader-epoch-file-cache gets updated to: {(7, 51), (9, 100)}.
// While validating the segment2, we should ensure the overlapping remote log segments case.
Integer segmentFirstEpoch = segmentLeaderEpochs.ceilingKey(leaderEpochs.firstKey());
if (segmentFirstEpoch == null) {
LOGGER.debug("Segment {} is not within the partition leader epoch lineage. " +
"Remote segment epochs: {} and partition leader epochs: {}",
segmentMetadata.remoteLogSegmentId(), segmentLeaderEpochs, leaderEpochs);
return false;
}
for (Map.Entry<Integer, Long> entry : segmentLeaderEpochs.entrySet()) {
int epoch = entry.getKey();
long offset = entry.getValue();
if (epoch < segmentFirstEpoch) {
continue;
}
// If segment's epoch does not exist in the leader epoch lineage then it is not a valid segment.
if (!leaderEpochs.containsKey(epoch)) {
LOGGER.debug("Segment {} epoch {} is not within the leader epoch lineage. " +
"Remote segment epochs: {} and partition leader epochs: {}",
segmentMetadata.remoteLogSegmentId(), epoch, segmentLeaderEpochs, leaderEpochs);
return false;
}
// Two cases:
// case-1: When the segment-first-epoch equals to the first-epoch in the leader-epoch-lineage, then the
// offset value can lie anywhere between 0 to (next-epoch-start-offset - 1) is valid.
// case-2: When the segment-first-epoch is not equal to the first-epoch in the leader-epoch-lineage, then
// the offset value should be between (current-epoch-start-offset) to (next-epoch-start-offset - 1).
if (epoch == segmentFirstEpoch && leaderEpochs.lowerKey(epoch) != null && offset < leaderEpochs.get(epoch)) {
LOGGER.debug("Segment {} first-valid epoch {} offset is less than first leader epoch offset {}." +
"Remote segment epochs: {} and partition leader epochs: {}",
segmentMetadata.remoteLogSegmentId(), epoch, leaderEpochs.get(epoch),
segmentLeaderEpochs, leaderEpochs);
return false;
}
// Segment's end offset should be less than or equal to the respective leader epoch's offset.
if (epoch == segmentLastEpoch) {
Map.Entry<Integer, Long> nextEntry = leaderEpochs.higherEntry(epoch);
if (nextEntry != null && segmentEndOffset > nextEntry.getValue() - 1) {
LOGGER.debug("Segment {} end offset {} is more than leader epoch offset {}." +
"Remote segment epochs: {} and partition leader epochs: {}",
segmentMetadata.remoteLogSegmentId(), segmentEndOffset, nextEntry.getValue() - 1,
segmentLeaderEpochs, leaderEpochs);
return false;
}
}
// Next segment epoch entry and next leader epoch entry should be same to ensure that the segment's epoch
// is within the leader epoch lineage.
if (epoch != segmentLastEpoch && !leaderEpochs.higherEntry(epoch).equals(segmentLeaderEpochs.higherEntry(epoch))) {
LOGGER.debug("Segment {} epoch {} is not within the leader epoch lineage. " +
"Remote segment epochs: {} and partition leader epochs: {}",
segmentMetadata.remoteLogSegmentId(), epoch, segmentLeaderEpochs, leaderEpochs);
return false;
}
}
// segment end offset should be with in the log end offset.
if (segmentEndOffset >= logEndOffset) {
LOGGER.debug("Segment {} end offset {} is more than log end offset {}.",
segmentMetadata.remoteLogSegmentId(), segmentEndOffset, logEndOffset);
return false;
}
return true;
}
|
@Test
public void testRemoteSegmentWithinLeaderEpochsForOverlappingSegments() {
NavigableMap<Integer, Long> leaderEpochCache = new TreeMap<>();
leaderEpochCache.put(7, 51L);
leaderEpochCache.put(9, 100L);
TreeMap<Integer, Long> segment1Epochs = new TreeMap<>();
segment1Epochs.put(5, 14L);
segment1Epochs.put(7, 15L);
segment1Epochs.put(9, 100L);
RemoteLogSegmentMetadata segment1 = createRemoteLogSegmentMetadata(14, 150, segment1Epochs);
assertTrue(isRemoteSegmentWithinLeaderEpochs(segment1, 210, leaderEpochCache));
// segment2Epochs are not within the leaderEpochCache
TreeMap<Integer, Long> segment2Epochs = new TreeMap<>();
segment2Epochs.put(2, 5L);
segment2Epochs.put(3, 6L);
RemoteLogSegmentMetadata segment2 = createRemoteLogSegmentMetadata(2, 7, segment2Epochs);
assertFalse(isRemoteSegmentWithinLeaderEpochs(segment2, 210, leaderEpochCache));
// segment3Epochs are not within the leaderEpochCache
TreeMap<Integer, Long> segment3Epochs = new TreeMap<>();
segment3Epochs.put(7, 15L);
segment3Epochs.put(9, 100L);
segment3Epochs.put(10, 200L);
RemoteLogSegmentMetadata segment3 = createRemoteLogSegmentMetadata(15, 250, segment3Epochs);
assertFalse(isRemoteSegmentWithinLeaderEpochs(segment3, 210, leaderEpochCache));
// segment4Epochs are not within the leaderEpochCache
TreeMap<Integer, Long> segment4Epochs = new TreeMap<>();
segment4Epochs.put(8, 75L);
RemoteLogSegmentMetadata segment4 = createRemoteLogSegmentMetadata(75, 100, segment4Epochs);
assertFalse(isRemoteSegmentWithinLeaderEpochs(segment4, 210, leaderEpochCache));
// segment5Epochs does not match with the leaderEpochCache
TreeMap<Integer, Long> segment5Epochs = new TreeMap<>();
segment5Epochs.put(7, 15L);
segment5Epochs.put(9, 101L);
RemoteLogSegmentMetadata segment5 = createRemoteLogSegmentMetadata(15, 150, segment5Epochs);
assertFalse(isRemoteSegmentWithinLeaderEpochs(segment5, 210, leaderEpochCache));
// segment6Epochs does not match with the leaderEpochCache
TreeMap<Integer, Long> segment6Epochs = new TreeMap<>();
segment6Epochs.put(9, 99L);
RemoteLogSegmentMetadata segment6 = createRemoteLogSegmentMetadata(99, 150, segment6Epochs);
assertFalse(isRemoteSegmentWithinLeaderEpochs(segment6, 210, leaderEpochCache));
}
|
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof PiCounterCellData)) {
return false;
}
PiCounterCellData that = (PiCounterCellData) o;
return packets == that.packets &&
bytes == that.bytes;
}
|
@Test
public void testEquals() {
new EqualsTester()
.addEqualityGroup(PI_COUNTER_DATA_1, SAME_AS_PI_COUNTER_DATA_1)
.addEqualityGroup(PI_COUNTER_DATA_2)
.testEquals();
}
|
public Distance avgAltitude() {
double avgInFeet = (point1.altitude().inFeet() + point2.altitude().inFeet()) / 2.0;
return Distance.ofFeet(avgInFeet);
}
|
@Test
public void testAvgAltitude() {
Point p1 = Point.builder().altitude(Distance.ofFeet(1000.0)).time(EPOCH).latLong(0.0, 0.0).build();
Point p2 = Point.builder().altitude(Distance.ofFeet(1500.0)).time(EPOCH).latLong(0.0, 0.0).build();
PointPair pair = PointPair.of(p1, p2);
assertEquals(
Distance.ofFeet(1250.0),
pair.avgAltitude()
);
}
|
@Override
public void acceptPolicy(ApplicationId appId, Set<Permission> permissionSet) {
Application app = applicationAdminService.getApplication(appId);
if (app == null) {
log.warn("Unknown Application");
return;
}
states.computeIf(appId,
Objects::nonNull,
(id, securityInfo) -> {
switch (securityInfo.getState()) {
case POLICY_VIOLATED:
System.out.println(
"This application has violated the security policy. Please uninstall.");
return securityInfo;
case SECURED:
System.out.println(
"The policy has been accepted already. To review policy, review [app.name]");
return securityInfo;
case INSTALLED:
System.out.println("Please review the security policy prior to accept them");
log.warn("Application has not been reviewed");
return securityInfo;
case REVIEWED:
return new SecurityInfo(permissionSet, SECURED);
default:
return securityInfo;
}
});
}
|
@Test
public void testAcceptPolicy() {
assertEquals(SECURED, states.get(appId).getState());
states.compute(appId,
(id, securityInfo) -> {
switch (securityInfo.getState()) {
case POLICY_VIOLATED:
return new SecurityInfo(securityInfo.getPermissions(), SECURED);
case SECURED:
return new SecurityInfo(securityInfo.getPermissions(), POLICY_VIOLATED);
case INSTALLED:
return new SecurityInfo(securityInfo.getPermissions(), REVIEWED);
case REVIEWED:
return new SecurityInfo(securityInfo.getPermissions(), INSTALLED);
default:
return securityInfo;
}
});
assertEquals(POLICY_VIOLATED, states.get(appId).getState());
}
|
public void notify(PluginJarChangeListener listener, Collection<BundleOrPluginFileDetails> knowPluginFiles, Collection<BundleOrPluginFileDetails> currentPluginFiles) {
List<BundleOrPluginFileDetails> oldPlugins = new ArrayList<>(knowPluginFiles);
subtract(oldPlugins, currentPluginFiles).forEach(listener::pluginJarRemoved);
currentPluginFiles.forEach(newPlugin -> {
int index = oldPlugins.indexOf(newPlugin);
if (index < 0) {
listener.pluginJarAdded(newPlugin);
} else if (newPlugin.doesTimeStampDiffer(oldPlugins.get(index))) {
listener.pluginJarUpdated(newPlugin);
}
});
}
|
@Test
void shouldNotifyWhenNewPluginIsAdded() {
final PluginJarChangeListener listener = mock(PluginJarChangeListener.class);
List<BundleOrPluginFileDetails> knownPlugins = Collections.emptyList();
BundleOrPluginFileDetails pluginOne = mock(BundleOrPluginFileDetails.class);
BundleOrPluginFileDetails pluginTwo = mock(BundleOrPluginFileDetails.class);
BundleOrPluginFileDetails pluginThree = mock(BundleOrPluginFileDetails.class);
List<BundleOrPluginFileDetails> newPlugins = List.of(pluginOne, pluginTwo, pluginThree);
pluginChangeNotifier.notify(listener, knownPlugins, newPlugins);
verify(listener).pluginJarAdded(pluginOne);
verify(listener, never()).pluginJarRemoved(any());
verify(listener, never()).pluginJarUpdated(any());
}
|
static KiePMMLNormDiscrete getKiePMMLNormDiscrete(final NormDiscrete normDiscrete) {
List<KiePMMLExtension> extensions = getKiePMMLExtensions(normDiscrete.getExtensions());
return new KiePMMLNormDiscrete(normDiscrete.getField(),
extensions,
normDiscrete.getValue().toString(),
normDiscrete.getMapMissingTo());
}
|
@Test
void getKiePMMLNormDiscrete() {
NormDiscrete toConvert = getRandomNormDiscrete();
KiePMMLNormDiscrete retrieved = KiePMMLNormDiscreteInstanceFactory.getKiePMMLNormDiscrete(toConvert);
commonVerifyKiePMMLNormDiscrete(retrieved, toConvert);
}
|
@Override
public KsMaterializedQueryResult<Row> get(
final GenericKey key,
final int partition,
final Optional<Position> position
) {
try {
final ReadOnlyKeyValueStore<GenericKey, ValueAndTimestamp<GenericRow>> store = stateStore
.store(QueryableStoreTypes.timestampedKeyValueStore(), partition);
final ValueAndTimestamp<GenericRow> row = store.get(key);
if (row == null) {
return KsMaterializedQueryResult.rowIterator(Collections.emptyIterator());
} else {
return KsMaterializedQueryResult.rowIterator(ImmutableList.of(Row.of(
stateStore.schema(), key, row.value(), row.timestamp())).iterator());
}
} catch (final Exception e) {
throw new MaterializationException("Failed to get value from materialized table", e);
}
}
|
@Test
public void shouldCloseIterator_fullTableScan() {
// Given:
when(tableStore.all()).thenReturn(keyValueIterator);
when(keyValueIterator.hasNext()).thenReturn(true, true, false);
when(keyValueIterator.next())
.thenReturn(KEY_VALUE1)
.thenReturn(KEY_VALUE2);
// When:
Streams.stream(table.get(PARTITION).rowIterator)
.collect(Collectors.toList());
// Then:
verify(keyValueIterator).close();
}
|
public FileTime now() {
return fileTimeSource.now();
}
|
@Test
public void testNow() {
assertThat(state.now()).isEqualTo(fileTimeSource.now());
fileTimeSource.advance(Duration.ofSeconds(1));
assertThat(state.now()).isEqualTo(fileTimeSource.now());
}
|
@Operation(summary = "saveWorkerGroup", description = "CREATE_WORKER_GROUP_NOTES")
@Parameters({
@Parameter(name = "id", description = "WORKER_GROUP_ID", schema = @Schema(implementation = int.class, example = "10", defaultValue = "0")),
@Parameter(name = "name", description = "WORKER_GROUP_NAME", required = true, schema = @Schema(implementation = String.class)),
@Parameter(name = "addrList", description = "WORKER_ADDR_LIST", required = true, schema = @Schema(implementation = String.class)),
@Parameter(name = "description", description = "WORKER_DESC", required = false, schema = @Schema(implementation = String.class)),
@Parameter(name = "otherParamsJson", description = "WORKER_PARAMS_JSON", required = false, schema = @Schema(implementation = String.class)),
})
@PostMapping()
@ResponseStatus(HttpStatus.OK)
@ApiException(SAVE_ERROR)
public Result saveWorkerGroup(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "id", required = false, defaultValue = "0") int id,
@RequestParam(value = "name") String name,
@RequestParam(value = "addrList") String addrList,
@RequestParam(value = "description", required = false, defaultValue = "") String description,
@RequestParam(value = "otherParamsJson", required = false, defaultValue = "") String otherParamsJson) {
Map<String, Object> result =
workerGroupService.saveWorkerGroup(loginUser, id, name, addrList, description, otherParamsJson);
return returnDataList(result);
}
|
@Test
public void testSaveWorkerGroup() throws Exception {
Map<String, String> serverMaps = new HashMap<>();
serverMaps.put("192.168.0.1", "192.168.0.1");
serverMaps.put("192.168.0.2", "192.168.0.2");
Mockito.when(registryClient.getServerMaps(RegistryNodeType.WORKER)).thenReturn(serverMaps);
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("name", "cxc_work_group");
paramsMap.add("addrList", "192.168.0.1,192.168.0.2");
paramsMap.add("description", "");
paramsMap.add("otherParamsJson", "");
MvcResult mvcResult = mockMvc.perform(post("/worker-groups")
.header("sessionId", sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assertions.assertTrue(result != null && result.isSuccess());
logger.info(mvcResult.getResponse().getContentAsString());
}
|
@Override
public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException {
if(directory.isRoot()) {
return new DeepBoxesListService().list(directory, listener);
}
if(containerService.isDeepbox(directory)) { // in DeepBox
return new BoxesListService().list(directory, listener);
}
if(containerService.isBox(directory)) { // in Box
return new BoxListService().list(directory, listener);
}
final String deepBoxNodeId = fileid.getDeepBoxNodeId(directory);
final String boxNodeId = fileid.getBoxNodeId(directory);
if(containerService.isThirdLevel(directory)) { // in Inbox/Documents/Trash
// N.B. although Documents and Trash have a nodeId, calling the listFiles1/listTrash1 API with
// parentNode may fail!
if(containerService.isInInbox(directory)) {
return new NodeListService(new Contents() {
@Override
public NodeContent getNodes(final int offset) throws ApiException {
return new BoxRestControllerApi(session.getClient()).listQueue(deepBoxNodeId,
boxNodeId,
null,
offset, chunksize, "displayName asc");
}
}).list(directory, listener);
}
if(containerService.isInDocuments(directory)) {
return new NodeListService(new Contents() {
@Override
public NodeContent getNodes(final int offset) throws ApiException {
return new BoxRestControllerApi(session.getClient()).listFiles(
deepBoxNodeId,
boxNodeId,
offset, chunksize, "displayName asc");
}
}).list(directory, listener);
}
if(containerService.isInTrash(directory)) {
return new NodeListService(new Contents() {
@Override
public NodeContent getNodes(final int offset) throws ApiException {
return new BoxRestControllerApi(session.getClient()).listTrash(
deepBoxNodeId,
boxNodeId,
offset, chunksize, "displayName asc");
}
}).list(directory, listener);
}
}
// in subfolder of Documents/Trash (Inbox has no subfolders)
final String nodeId = fileid.getFileId(directory);
if(containerService.isInTrash(directory)) {
return new NodeListService(new Contents() {
@Override
public NodeContent getNodes(final int offset) throws ApiException {
return new BoxRestControllerApi(session.getClient()).listTrash1(
deepBoxNodeId,
boxNodeId,
nodeId,
offset, chunksize, "displayName asc");
}
}).list(directory, listener);
}
return new NodeListService(new Contents() {
@Override
public NodeContent getNodes(final int offset) throws ApiException {
return new BoxRestControllerApi(session.getClient()).listFiles1(
deepBoxNodeId,
boxNodeId,
nodeId,
offset, chunksize, "displayName asc");
}
}).list(directory, listener);
}
|
@Test
public void testListDeepBoxes() throws Exception {
final DeepboxIdProvider nodeid = new DeepboxIdProvider(session);
final Path directory = new Path("/", EnumSet.of(Path.Type.directory, Path.Type.volume));
final AttributedList<Path> list = new DeepboxListService(session, nodeid).list(directory, new DisabledListProgressListener());
assertNotSame(AttributedList.emptyList(), list);
assertFalse(list.isEmpty());
assertNotNull(list.find(new SimplePathPredicate(new Path("/ORG 4 - DeepBox Desktop App", EnumSet.of(Path.Type.directory, Path.Type.volume)))));
assertEquals(2, list.size());
for(final Path f : list) {
assertSame(directory, f.getParent());
assertFalse(f.getName().contains(String.valueOf(Path.DELIMITER)));
// no modification/creation date for DeepBoxes
assertTrue(f.attributes().getModificationDate() < 0);
assertTrue(f.attributes().getCreationDate() < 0);
assertNotNull(nodeid.getFileId(new Path(f).withAttributes(new PathAttributes())));
assertEquals(f.attributes(), new DeepboxAttributesFinderFeature(session, nodeid).find(new Path(f.getAbsolute(), f.getType())));
}
}
|
@Override
public void addNewBlockedNodes(Collection<BlockedNode> newNodes) {
assertRunningInMainThread();
if (newNodes.isEmpty()) {
return;
}
BlockedNodeAdditionResult result = blocklistTracker.addNewBlockedNodes(newNodes);
Collection<BlockedNode> newlyAddedNodes = result.getNewlyAddedNodes();
Collection<BlockedNode> allNodes =
Stream.concat(newlyAddedNodes.stream(), result.getMergedNodes().stream())
.collect(Collectors.toList());
if (!newlyAddedNodes.isEmpty()) {
if (log.isDebugEnabled()) {
log.debug(
"Newly added {} blocked nodes, details: {}."
+ " Total {} blocked nodes currently, details: {}.",
newlyAddedNodes.size(),
newlyAddedNodes,
blocklistTracker.getAllBlockedNodes().size(),
blocklistTracker.getAllBlockedNodes());
} else {
log.info(
"Newly added {} blocked nodes. Total {} blocked nodes currently.",
newlyAddedNodes.size(),
blocklistTracker.getAllBlockedNodes().size());
}
blocklistListeners.forEach(listener -> listener.notifyNewBlockedNodes(allNodes));
blocklistContext.blockResources(newlyAddedNodes);
} else if (!allNodes.isEmpty()) {
blocklistListeners.forEach(listener -> listener.notifyNewBlockedNodes(allNodes));
}
}
|
@Test
void testAddNewBlockedNodes() throws Exception {
BlockedNode node1 = new BlockedNode("node1", "cause", 1L);
BlockedNode node2 = new BlockedNode("node2", "cause", 1L);
BlockedNode node2Update = new BlockedNode("node2", "cause", 2L);
List<List<BlockedNode>> contextReceivedNodes = new ArrayList<>();
TestBlocklistContext context =
TestBlocklistContext.newBuilder()
.setBlockResourcesConsumer(
blockedNodes ->
contextReceivedNodes.add(new ArrayList<>(blockedNodes)))
.build();
TestBlocklistListener listener = new TestBlocklistListener();
try (DefaultBlocklistHandler handler = createDefaultBlocklistHandler(context)) {
handler.registerBlocklistListener(listener);
assertThat(listener.listenerReceivedNodes).isEmpty();
assertThat(contextReceivedNodes).isEmpty();
// add node1, node2
handler.addNewBlockedNodes(Arrays.asList(node1, node2));
// check listener and context
assertThat(listener.listenerReceivedNodes).hasSize(1);
assertThat(listener.listenerReceivedNodes.get(0))
.containsExactlyInAnyOrder(node1, node2);
assertThat(contextReceivedNodes).hasSize(1);
assertThat(contextReceivedNodes.get(0)).containsExactlyInAnyOrder(node1, node2);
// add node1, node2 again, should not notify context and listener
assertThat(contextReceivedNodes).hasSize(1);
assertThat(listener.listenerReceivedNodes).hasSize(1);
// update node2, should notify listener, not notify context
handler.addNewBlockedNodes(Collections.singleton(node2Update));
assertThat(listener.listenerReceivedNodes).hasSize(2);
assertThat(listener.listenerReceivedNodes.get(1)).containsExactly(node2Update);
assertThat(contextReceivedNodes).hasSize(1);
// register a new listener, will notify all items
TestBlocklistListener listener2 = new TestBlocklistListener();
handler.registerBlocklistListener(listener2);
assertThat(listener2.listenerReceivedNodes).hasSize(1);
assertThat(listener2.listenerReceivedNodes.get(0))
.containsExactlyInAnyOrder(node1, node2Update);
}
}
|
@Subscribe
public void onChatMessage(ChatMessage event)
{
if (event.getType() == ChatMessageType.GAMEMESSAGE || event.getType() == ChatMessageType.SPAM)
{
String message = Text.removeTags(event.getMessage());
Matcher dodgyCheckMatcher = DODGY_CHECK_PATTERN.matcher(message);
Matcher dodgyProtectMatcher = DODGY_PROTECT_PATTERN.matcher(message);
Matcher dodgyBreakMatcher = DODGY_BREAK_PATTERN.matcher(message);
Matcher bindingNecklaceCheckMatcher = BINDING_CHECK_PATTERN.matcher(message);
Matcher bindingNecklaceUsedMatcher = BINDING_USED_PATTERN.matcher(message);
Matcher ringOfForgingCheckMatcher = RING_OF_FORGING_CHECK_PATTERN.matcher(message);
Matcher amuletOfChemistryCheckMatcher = AMULET_OF_CHEMISTRY_CHECK_PATTERN.matcher(message);
Matcher amuletOfChemistryUsedMatcher = AMULET_OF_CHEMISTRY_USED_PATTERN.matcher(message);
Matcher amuletOfChemistryBreakMatcher = AMULET_OF_CHEMISTRY_BREAK_PATTERN.matcher(message);
Matcher amuletOfBountyCheckMatcher = AMULET_OF_BOUNTY_CHECK_PATTERN.matcher(message);
Matcher amuletOfBountyUsedMatcher = AMULET_OF_BOUNTY_USED_PATTERN.matcher(message);
Matcher chronicleAddMatcher = CHRONICLE_ADD_PATTERN.matcher(message);
Matcher chronicleUseAndCheckMatcher = CHRONICLE_USE_AND_CHECK_PATTERN.matcher(message);
Matcher slaughterActivateMatcher = BRACELET_OF_SLAUGHTER_ACTIVATE_PATTERN.matcher(message);
Matcher slaughterCheckMatcher = BRACELET_OF_SLAUGHTER_CHECK_PATTERN.matcher(message);
Matcher expeditiousActivateMatcher = EXPEDITIOUS_BRACELET_ACTIVATE_PATTERN.matcher(message);
Matcher expeditiousCheckMatcher = EXPEDITIOUS_BRACELET_CHECK_PATTERN.matcher(message);
Matcher bloodEssenceCheckMatcher = BLOOD_ESSENCE_CHECK_PATTERN.matcher(message);
Matcher bloodEssenceExtractMatcher = BLOOD_ESSENCE_EXTRACT_PATTERN.matcher(message);
Matcher braceletOfClayCheckMatcher = BRACELET_OF_CLAY_CHECK_PATTERN.matcher(message);
if (message.contains(RING_OF_RECOIL_BREAK_MESSAGE))
{
notifier.notify(config.recoilNotification(), "Your Ring of Recoil has shattered");
}
else if (dodgyBreakMatcher.find())
{
notifier.notify(config.dodgyNotification(), "Your dodgy necklace has crumbled to dust.");
updateDodgyNecklaceCharges(MAX_DODGY_CHARGES);
}
else if (dodgyCheckMatcher.find())
{
updateDodgyNecklaceCharges(Integer.parseInt(dodgyCheckMatcher.group(1)));
}
else if (dodgyProtectMatcher.find())
{
updateDodgyNecklaceCharges(Integer.parseInt(dodgyProtectMatcher.group(1)));
}
else if (amuletOfChemistryCheckMatcher.find())
{
updateAmuletOfChemistryCharges(Integer.parseInt(amuletOfChemistryCheckMatcher.group(1)));
}
else if (amuletOfChemistryUsedMatcher.find())
{
final String match = amuletOfChemistryUsedMatcher.group(1);
int charges = 1;
if (!match.equals("one"))
{
charges = Integer.parseInt(match);
}
updateAmuletOfChemistryCharges(charges);
}
else if (amuletOfChemistryBreakMatcher.find())
{
notifier.notify(config.amuletOfChemistryNotification(), "Your amulet of chemistry has crumbled to dust.");
updateAmuletOfChemistryCharges(MAX_AMULET_OF_CHEMISTRY_CHARGES);
}
else if (amuletOfBountyCheckMatcher.find())
{
updateAmuletOfBountyCharges(Integer.parseInt(amuletOfBountyCheckMatcher.group(1)));
}
else if (amuletOfBountyUsedMatcher.find())
{
updateAmuletOfBountyCharges(Integer.parseInt(amuletOfBountyUsedMatcher.group(1)));
}
else if (message.equals(AMULET_OF_BOUNTY_BREAK_TEXT))
{
updateAmuletOfBountyCharges(MAX_AMULET_OF_BOUNTY_CHARGES);
}
else if (message.contains(BINDING_BREAK_TEXT))
{
notifier.notify(config.bindingNotification(), BINDING_BREAK_TEXT);
// This chat message triggers before the used message so add 1 to the max charges to ensure proper sync
updateBindingNecklaceCharges(MAX_BINDING_CHARGES + 1);
}
else if (bindingNecklaceUsedMatcher.find())
{
final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT);
if (equipment.contains(ItemID.BINDING_NECKLACE))
{
updateBindingNecklaceCharges(getItemCharges(ItemChargeConfig.KEY_BINDING_NECKLACE) - 1);
}
}
else if (bindingNecklaceCheckMatcher.find())
{
final String match = bindingNecklaceCheckMatcher.group(1);
int charges = 1;
if (!match.equals("one"))
{
charges = Integer.parseInt(match);
}
updateBindingNecklaceCharges(charges);
}
else if (ringOfForgingCheckMatcher.find())
{
final String match = ringOfForgingCheckMatcher.group(1);
int charges = 1;
if (!match.equals("one"))
{
charges = Integer.parseInt(match);
}
updateRingOfForgingCharges(charges);
}
else if (message.equals(RING_OF_FORGING_USED_TEXT) || message.equals(RING_OF_FORGING_VARROCK_PLATEBODY))
{
final ItemContainer inventory = client.getItemContainer(InventoryID.INVENTORY);
final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT);
// Determine if the player smelted with a Ring of Forging equipped.
if (equipment == null)
{
return;
}
if (equipment.contains(ItemID.RING_OF_FORGING) && (message.equals(RING_OF_FORGING_USED_TEXT) || inventory.count(ItemID.IRON_ORE) > 1))
{
int charges = Ints.constrainToRange(getItemCharges(ItemChargeConfig.KEY_RING_OF_FORGING) - 1, 0, MAX_RING_OF_FORGING_CHARGES);
updateRingOfForgingCharges(charges);
}
}
else if (message.equals(RING_OF_FORGING_BREAK_TEXT))
{
notifier.notify(config.ringOfForgingNotification(), "Your ring of forging has melted.");
// This chat message triggers before the used message so add 1 to the max charges to ensure proper sync
updateRingOfForgingCharges(MAX_RING_OF_FORGING_CHARGES + 1);
}
else if (chronicleAddMatcher.find())
{
final String match = chronicleAddMatcher.group(1);
if (match.equals("one"))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1);
}
else
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, Integer.parseInt(match));
}
}
else if (chronicleUseAndCheckMatcher.find())
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, Integer.parseInt(chronicleUseAndCheckMatcher.group(1)));
}
else if (message.equals(CHRONICLE_ONE_CHARGE_TEXT))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1);
}
else if (message.equals(CHRONICLE_EMPTY_TEXT) || message.equals(CHRONICLE_NO_CHARGES_TEXT))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 0);
}
else if (message.equals(CHRONICLE_FULL_TEXT))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1000);
}
else if (slaughterActivateMatcher.find())
{
final String found = slaughterActivateMatcher.group(1);
if (found == null)
{
updateBraceletOfSlaughterCharges(MAX_SLAYER_BRACELET_CHARGES);
notifier.notify(config.slaughterNotification(), BRACELET_OF_SLAUGHTER_BREAK_TEXT);
}
else
{
updateBraceletOfSlaughterCharges(Integer.parseInt(found));
}
}
else if (slaughterCheckMatcher.find())
{
updateBraceletOfSlaughterCharges(Integer.parseInt(slaughterCheckMatcher.group(1)));
}
else if (expeditiousActivateMatcher.find())
{
final String found = expeditiousActivateMatcher.group(1);
if (found == null)
{
updateExpeditiousBraceletCharges(MAX_SLAYER_BRACELET_CHARGES);
notifier.notify(config.expeditiousNotification(), EXPEDITIOUS_BRACELET_BREAK_TEXT);
}
else
{
updateExpeditiousBraceletCharges(Integer.parseInt(found));
}
}
else if (expeditiousCheckMatcher.find())
{
updateExpeditiousBraceletCharges(Integer.parseInt(expeditiousCheckMatcher.group(1)));
}
else if (bloodEssenceCheckMatcher.find())
{
updateBloodEssenceCharges(Integer.parseInt(bloodEssenceCheckMatcher.group(1)));
}
else if (bloodEssenceExtractMatcher.find())
{
updateBloodEssenceCharges(getItemCharges(ItemChargeConfig.KEY_BLOOD_ESSENCE) - Integer.parseInt(bloodEssenceExtractMatcher.group(1)));
}
else if (message.contains(BLOOD_ESSENCE_ACTIVATE_TEXT))
{
updateBloodEssenceCharges(MAX_BLOOD_ESSENCE_CHARGES);
}
else if (braceletOfClayCheckMatcher.find())
{
updateBraceletOfClayCharges(Integer.parseInt(braceletOfClayCheckMatcher.group(1)));
}
else if (message.equals(BRACELET_OF_CLAY_USE_TEXT) || message.equals(BRACELET_OF_CLAY_USE_TEXT_TRAHAEARN))
{
final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT);
// Determine if the player mined with a Bracelet of Clay equipped.
if (equipment != null && equipment.contains(ItemID.BRACELET_OF_CLAY))
{
final ItemContainer inventory = client.getItemContainer(InventoryID.INVENTORY);
// Charge is not used if only 1 inventory slot is available when mining in Prifddinas
boolean ignore = inventory != null
&& inventory.count() == 27
&& message.equals(BRACELET_OF_CLAY_USE_TEXT_TRAHAEARN);
if (!ignore)
{
int charges = Ints.constrainToRange(getItemCharges(ItemChargeConfig.KEY_BRACELET_OF_CLAY) - 1, 0, MAX_BRACELET_OF_CLAY_CHARGES);
updateBraceletOfClayCharges(charges);
}
}
}
else if (message.equals(BRACELET_OF_CLAY_BREAK_TEXT))
{
notifier.notify(config.braceletOfClayNotification(), "Your bracelet of clay has crumbled to dust");
updateBraceletOfClayCharges(MAX_BRACELET_OF_CLAY_CHARGES);
}
}
}
|
@Test
public void testChronicleTeleportEmpty()
{
ChatMessage chatMessage = new ChatMessage(null, ChatMessageType.GAMEMESSAGE, "", CHRONICLE_TELEPORT_EMPTY, "", 0);
itemChargePlugin.onChatMessage(chatMessage);
verify(configManager).setRSProfileConfiguration(ItemChargeConfig.GROUP, ItemChargeConfig.KEY_CHRONICLE, 0);
}
|
public KsqlTarget target(final URI server) {
return target(server, Collections.emptyMap());
}
|
@Test
public void shouldHandleErrorMessageOnGetRequests() {
// Given:
server.setResponseObject(new KsqlErrorMessage(40000, "ouch"));
server.setErrorCode(400);
// When:
KsqlTarget target = ksqlClient.target(serverUri);
RestResponse<ServerInfo> response = target.getServerInfo();
// Then:
assertThat(response.getStatusCode(), is(400));
assertThat(response.getErrorMessage().getErrorCode(), is(40000));
assertThat(response.getErrorMessage().getMessage(), is("ouch"));
}
|
public Set<MethodDescriptor> getAllMethods() {
Set<MethodDescriptor> methodModels = new HashSet<>();
methods.forEach((k, v) -> methodModels.addAll(v));
return methodModels;
}
|
@Test
void getAllMethods() {
Assertions.assertFalse(service.getAllMethods().isEmpty());
}
|
@Override
public void onApplicationEvent(ApplicationStartedEvent event) {
schemeManager.schemes().forEach(scheme -> {
var factory = new ExtensionRouterFunctionFactory(scheme, client);
this.schemeRouterFuncMapper.put(scheme, factory.create());
});
}
|
@Test
void shouldBuildRouterFunctionsOnApplicationStarted() {
var applicationStartedEvent = mock(ApplicationStartedEvent.class);
extensionRouterFunc.onApplicationEvent(applicationStartedEvent);
verify(schemeManager).schemes();
}
|
public DescribeGroupsResponseData.DescribedGroupMember describe(String protocolName) {
return describeNoMetadata().setMemberMetadata(metadata(protocolName));
}
|
@Test
public void testDescribe() {
JoinGroupRequestProtocolCollection protocols = new JoinGroupRequestProtocolCollection(Collections.singletonList(
new JoinGroupRequestProtocol()
.setName("range")
.setMetadata(new byte[]{0})
).iterator());
ClassicGroupMember member = new ClassicGroupMember(
"member",
Optional.of("group-instance-id"),
"client-id",
"client-host",
10,
4500,
"generic",
protocols,
new byte[0]
);
DescribeGroupsResponseData.DescribedGroupMember expectedDescribedGroupMember =
new DescribeGroupsResponseData.DescribedGroupMember()
.setMemberId("member")
.setGroupInstanceId("group-instance-id")
.setClientId("client-id")
.setClientHost("client-host")
.setMemberAssignment(new byte[0])
.setMemberMetadata(member.metadata("range"));
DescribeGroupsResponseData.DescribedGroupMember describedGroupMember = member.describe("range");
assertEquals(expectedDescribedGroupMember, describedGroupMember);
}
|
public static int formatFloatFast(float value, int maxFractionDigits, byte[] asciiBuffer)
{
if (Float.isNaN(value) ||
Float.isInfinite(value) ||
value > Long.MAX_VALUE ||
value <= Long.MIN_VALUE ||
maxFractionDigits > MAX_FRACTION_DIGITS)
{
return -1;
}
int offset = 0;
long integerPart = (long) value;
//handle sign
if (value < 0)
{
asciiBuffer[offset++] = '-';
integerPart = -integerPart;
}
//extract fraction part
long fractionPart = (long) ((Math.abs((double)value) - integerPart) * POWER_OF_TENS[maxFractionDigits] + 0.5d);
//Check for rounding to next integer
if (fractionPart >= POWER_OF_TENS[maxFractionDigits]) {
integerPart++;
fractionPart -= POWER_OF_TENS[maxFractionDigits];
}
//format integer part
offset = formatPositiveNumber(integerPart, getExponent(integerPart), false, asciiBuffer, offset);
if (fractionPart > 0 && maxFractionDigits > 0)
{
asciiBuffer[offset++] = '.';
offset = formatPositiveNumber(fractionPart, maxFractionDigits - 1, true, asciiBuffer, offset);
}
return offset;
}
|
@Test
void testFormatOfRealValues()
{
assertEquals(3, NumberFormatUtil.formatFloatFast(0.7f, 5, buffer));
assertArrayEquals(new byte[]{'0', '.', '7'}, Arrays.copyOfRange(buffer, 0, 3));
assertEquals(4, NumberFormatUtil.formatFloatFast(-0.7f, 5, buffer));
assertArrayEquals(new byte[]{'-', '0', '.', '7'}, Arrays.copyOfRange(buffer, 0, 4));
assertEquals(5, NumberFormatUtil.formatFloatFast(0.003f, 5, buffer));
assertArrayEquals(new byte[]{'0', '.', '0', '0', '3'}, Arrays.copyOfRange(buffer, 0, 5));
assertEquals(6, NumberFormatUtil.formatFloatFast(-0.003f, 5, buffer));
assertArrayEquals(new byte[]{'-', '0', '.', '0', '0', '3'},
Arrays.copyOfRange(buffer, 0, 6));
}
|
public static NotControllerException newWrongControllerException(OptionalInt controllerId) {
if (controllerId.isPresent()) {
return new NotControllerException("The active controller appears to be node " +
controllerId.getAsInt() + ".");
} else {
return new NotControllerException("No controller appears to be active.");
}
}
|
@Test
public void testNewWrongControllerExceptionWithActiveController() {
assertExceptionsMatch(new NotControllerException("The active controller appears to be node 1."),
newWrongControllerException(OptionalInt.of(1)));
}
|
@Override
public Set<SystemScope> getUnrestricted() {
return Sets.filter(getAll(), Predicates.not(isRestricted));
}
|
@Test
public void getUnrestricted() {
Set<SystemScope> unrestricted = Sets.newHashSet(defaultDynScope1, defaultDynScope2, dynScope1);
assertThat(service.getUnrestricted(), equalTo(unrestricted));
}
|
@ConstantFunction(name = "bitShiftLeft", argTypes = {SMALLINT, BIGINT}, returnType = SMALLINT)
public static ConstantOperator bitShiftLeftSmallInt(ConstantOperator first, ConstantOperator second) {
return ConstantOperator.createSmallInt((short) (first.getSmallint() << second.getBigint()));
}
|
@Test
public void bitShiftLeftSmallInt() {
assertEquals(80, ScalarOperatorFunctions.bitShiftLeftSmallInt(O_SI_10, O_BI_3).getSmallint());
}
|
public OpenAPI read(Class<?> cls) {
return read(cls, resolveApplicationPath(), null, false, null, null, new LinkedHashSet<String>(), new ArrayList<Parameter>(), new HashSet<Class<?>>());
}
|
@Test(description = "Responses schema resolved from return type")
public void testResponseReturnType() {
Reader reader = new Reader(new OpenAPI());
OpenAPI openAPI = reader.read(ResponseReturnTypeResource.class);
String yaml = "openapi: 3.0.1\n" +
"paths:\n" +
" /sample/{id}:\n" +
" get:\n" +
" summary: Find by id\n" +
" description: Find by id operation\n" +
" operationId: find\n" +
" parameters:\n" +
" - name: id\n" +
" in: path\n" +
" description: ID\n" +
" required: true\n" +
" schema:\n" +
" type: integer\n" +
" format: int32\n" +
" responses:\n" +
" \"200\":\n" +
" description: Ok\n" +
" content:\n" +
" application/json:\n" +
" schema:\n" +
" $ref: '#/components/schemas/TestDTO'\n" +
" \"201\":\n" +
" description: \"201\"\n" +
" content:\n" +
" application/json:\n" +
" schema:\n" +
" $ref: '#/components/schemas/TestDTO'\n" +
" \"204\":\n" +
" description: No Content\n" +
" content:\n" +
" application/json: {}\n" +
" /sample/{id}/default:\n" +
" get:\n" +
" summary: Find by id (default)\n" +
" description: Find by id operation (default)\n" +
" operationId: findDefault\n" +
" parameters:\n" +
" - name: id\n" +
" in: path\n" +
" description: ID\n" +
" required: true\n" +
" schema:\n" +
" type: integer\n" +
" format: int32\n" +
" responses:\n" +
" default:\n" +
" description: default response\n" +
" content:\n" +
" application/json:\n" +
" schema:\n" +
" $ref: '#/components/schemas/TestDTO'\n" +
"components:\n" +
" schemas:\n" +
" TestDTO:\n" +
" type: object\n" +
" properties:\n" +
" foo:\n" +
" type: string";
SerializationMatchers.assertEqualsToYaml(openAPI, yaml);
}
|
public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException {
final Map<Path, List<ObjectKeyAndVersion>> map = new HashMap<>();
final List<Path> containers = new ArrayList<>();
for(Path file : files.keySet()) {
if(containerService.isContainer(file)) {
containers.add(file);
continue;
}
callback.delete(file);
final Path bucket = containerService.getContainer(file);
if(file.getType().contains(Path.Type.upload)) {
// In-progress multipart upload
try {
multipartService.delete(new MultipartUpload(file.attributes().getVersionId(),
bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(file)));
}
catch(NotfoundException ignored) {
log.warn(String.format("Ignore failure deleting multipart upload %s", file));
}
}
else {
final List<ObjectKeyAndVersion> keys = new ArrayList<>();
// Always returning 204 even if the key does not exist. Does not return 404 for non-existing keys
keys.add(new ObjectKeyAndVersion(containerService.getKey(file), file.attributes().getVersionId()));
if(map.containsKey(bucket)) {
map.get(bucket).addAll(keys);
}
else {
map.put(bucket, keys);
}
}
}
// Iterate over all containers and delete list of keys
for(Map.Entry<Path, List<ObjectKeyAndVersion>> entry : map.entrySet()) {
final Path container = entry.getKey();
final List<ObjectKeyAndVersion> keys = entry.getValue();
this.delete(container, keys, prompt);
}
for(Path file : containers) {
callback.delete(file);
// Finally delete bucket itself
try {
final String bucket = containerService.getContainer(file).getName();
session.getClient().deleteBucket(bucket);
session.getClient().getRegionEndpointCache().removeRegionForBucketName(bucket);
}
catch(ServiceException e) {
throw new S3ExceptionMappingService().map("Cannot delete {0}", e, file);
}
}
}
|
@Test
public void testDeleteVersionedPlaceholder() throws Exception {
final Path container = new Path("versioning-test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume));
final String name = new AlphanumericRandomStringService().random();
final S3AccessControlListFeature acl = new S3AccessControlListFeature(session);
{
final Path test = new S3DirectoryFeature(session, new S3WriteFeature(session, acl), acl).mkdir(
new Path(container, name, EnumSet.of(Path.Type.directory)), new TransferStatus());
assertTrue(new S3FindFeature(session, acl).find(test));
assertTrue(new DefaultFindFeature(session).find(test));
new S3MultipleDeleteFeature(session, acl).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
assertFalse(new S3FindFeature(session, acl).find(test));
}
{
final Path test = new S3DirectoryFeature(session, new S3WriteFeature(session, acl), acl).mkdir(
new Path(container, name, EnumSet.of(Path.Type.directory)), new TransferStatus());
assertTrue(new S3FindFeature(session, acl).find(test));
assertTrue(new DefaultFindFeature(session).find(test));
new S3MultipleDeleteFeature(session, acl).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
assertFalse(new S3FindFeature(session, acl).find(test));
}
assertFalse(new S3VersionedObjectListService(session, acl).list(container, new DisabledListProgressListener()).contains(
new Path(container, name, EnumSet.of(Path.Type.directory))));
}
|
@Override
public SelType call(String methodName, SelType[] args) {
if (args.length == 0 && "currentTimeMillis".equals(methodName)) {
return SelLong.of(DateTimeUtils.currentTimeMillis());
}
// no-op to support Arrays.asList
if (args.length == 1 && "asList".equals(methodName)) {
return args[0];
}
throw new UnsupportedOperationException(
type()
+ " DO NOT support calling method: "
+ methodName
+ " with args: "
+ Arrays.toString(args));
}
|
@Test(expected = UnsupportedOperationException.class)
public void testCallOtherMethods() {
SelMiscFunc.INSTANCE.call("nonExisting", new SelType[0]);
}
|
static String headerLine(CSVFormat csvFormat) {
return String.join(String.valueOf(csvFormat.getDelimiter()), csvFormat.getHeader());
}
|
@Test
public void givenNotIgnoreSurroundingSpaces_keepsSpaces() {
CSVFormat csvFormat = csvFormat().withIgnoreSurroundingSpaces(false);
PCollection<String> input =
pipeline.apply(
Create.of(
headerLine(csvFormat),
" a ,1,1.1",
"b, 2 ,2.2",
"c,3, 3.3 "));
CsvIOStringToCsvRecord underTest = new CsvIOStringToCsvRecord(csvFormat);
CsvIOParseResult<List<String>> result = input.apply(underTest);
PAssert.that(result.getOutput())
.containsInAnyOrder(
Arrays.asList(
Arrays.asList(" a ", "1", "1.1"),
Arrays.asList("b", " 2 ", "2.2"),
Arrays.asList("c", "3", " 3.3 ")));
PAssert.that(result.getErrors()).empty();
pipeline.run();
}
|
@Override
public void open() throws Exception {
executableStage = ExecutableStage.fromPayload(payload);
hasSdfProcessFn = hasSDF(executableStage);
initializeUserState(executableStage, getKeyedStateBackend(), pipelineOptions);
// TODO: Wire this into the distributed cache and make it pluggable.
// TODO: Do we really want this layer of indirection when accessing the stage bundle factory?
// It's a little strange because this operator is responsible for the lifetime of the stage
// bundle "factory" (manager?) but not the job or Flink bundle factories. How do we make
// ownership of the higher level "factories" explicit? Do we care?
stageContext = contextFactory.get(jobInfo);
stageBundleFactory = stageContext.getStageBundleFactory(executableStage);
stateRequestHandler = getStateRequestHandler(executableStage);
progressHandler =
new BundleProgressHandler() {
@Override
public void onProgress(ProcessBundleProgressResponse progress) {
if (flinkMetricContainer != null) {
flinkMetricContainer.updateMetrics(stepName, progress.getMonitoringInfosList());
}
}
@Override
public void onCompleted(ProcessBundleResponse response) {
if (flinkMetricContainer != null) {
flinkMetricContainer.updateMetrics(stepName, response.getMonitoringInfosList());
}
}
};
finalizationHandler =
BundleFinalizationHandlers.inMemoryFinalizer(
stageBundleFactory.getInstructionRequestHandler());
checkpointHandler = getBundleCheckpointHandler(hasSdfProcessFn);
minEventTimeTimerTimestampInCurrentBundle = Long.MAX_VALUE;
minEventTimeTimerTimestampInLastBundle = Long.MAX_VALUE;
super.setPreBundleCallback(this::preBundleStartCallback);
super.setBundleFinishedCallback(this::finishBundleCallback);
// This will call {@code createWrappingDoFnRunner} which needs the above dependencies.
super.open();
}
|
@Test
@SuppressWarnings("unchecked")
public void testEnsureStateCleanupWithKeyedInput() throws Exception {
TupleTag<Integer> mainOutput = new TupleTag<>("main-output");
DoFnOperator.MultiOutputOutputManagerFactory<Integer> outputManagerFactory =
new DoFnOperator.MultiOutputOutputManagerFactory(
mainOutput,
VarIntCoder.of(),
new SerializablePipelineOptions(FlinkPipelineOptions.defaults()));
VarIntCoder keyCoder = VarIntCoder.of();
ExecutableStageDoFnOperator<Integer, Integer> operator =
getOperator(
mainOutput,
Collections.emptyList(),
outputManagerFactory,
WindowingStrategy.globalDefault(),
keyCoder,
WindowedValue.getFullCoder(keyCoder, GlobalWindow.Coder.INSTANCE));
KeyedOneInputStreamOperatorTestHarness<Integer, WindowedValue<Integer>, WindowedValue<Integer>>
testHarness =
new KeyedOneInputStreamOperatorTestHarness(
operator,
val -> val,
new CoderTypeInformation<>(keyCoder, FlinkPipelineOptions.defaults()));
RemoteBundle bundle = Mockito.mock(RemoteBundle.class);
when(bundle.getInputReceivers())
.thenReturn(
ImmutableMap.<String, FnDataReceiver<WindowedValue>>builder()
.put("input", Mockito.mock(FnDataReceiver.class))
.build());
when(stageBundleFactory.getBundle(any(), any(), any(), any(), any(), any())).thenReturn(bundle);
testHarness.open();
Object doFnRunner = Whitebox.getInternalState(operator, "doFnRunner");
assertThat(doFnRunner, instanceOf(DoFnRunnerWithMetricsUpdate.class));
// There should be a StatefulDoFnRunner installed which takes care of clearing state
Object statefulDoFnRunner = Whitebox.getInternalState(doFnRunner, "delegate");
assertThat(statefulDoFnRunner, instanceOf(StatefulDoFnRunner.class));
}
|
@SuppressWarnings("OptionalGetWithoutIsPresent") // Enforced by type
@Override
public StreamsMaterializedWindowedTable windowed() {
if (!windowInfo.isPresent()) {
throw new UnsupportedOperationException("Table has non-windowed key");
}
final WindowInfo wndInfo = windowInfo.get();
final WindowType wndType = wndInfo.getType();
switch (wndType) {
case SESSION:
return new KsMaterializedSessionTable(stateStore,
SessionStoreCacheBypass::fetch, SessionStoreCacheBypass::fetchRange);
case HOPPING:
case TUMBLING:
return new KsMaterializedWindowTable(stateStore, wndInfo.getSize().get(),
WindowStoreCacheBypass::fetch,
WindowStoreCacheBypass::fetchAll,
WindowStoreCacheBypass::fetchRange);
default:
throw new UnsupportedOperationException("Unknown window type: " + wndInfo);
}
}
|
@Test
public void shouldReturnWindowedForHopping() {
// Given:
givenWindowType(Optional.of(WindowType.HOPPING));
// When:
final StreamsMaterializedWindowedTable table = materialization.windowed();
// Then:
assertThat(table, is(instanceOf(KsMaterializedWindowTable.class)));
}
|
protected String readEncodingAndString(int max) throws IOException {
byte encoding = readByte();
return readEncodedString(encoding, max - 1);
}
|
@Test
public void testReadString() throws IOException {
byte[] data = {
ID3Reader.ENCODING_ISO,
'T', 'e', 's', 't',
0 // Null-terminated
};
CountingInputStream inputStream = new CountingInputStream(new ByteArrayInputStream(data));
String string = new ID3Reader(inputStream).readEncodingAndString(1000);
assertEquals("Test", string);
}
|
@Override
public String lock(final Path file) throws BackgroundException {
if(!containerService.getContainer(file).getType().contains(Path.Type.shared)) {
log.warn(String.format("Skip attempting to lock file %s not in shared folder", file));
throw new UnsupportedException();
}
try {
for(LockFileResultEntry result : new DbxUserFilesRequests(session.getClient(file)).lockFileBatch(Collections.singletonList(
new LockFileArg(containerService.getKey(file)))).getEntries()) {
if(result.isFailure()) {
throw this.failure(result);
}
if(result.isSuccess()) {
if(log.isDebugEnabled()) {
log.debug(String.format("Locked file %s with result %s", file, result.getSuccessValue()));
}
return String.valueOf(true);
}
}
return null;
}
catch(DbxException e) {
throw new DropboxExceptionMappingService().map("Failure to write attributes of {0}", e, file);
}
}
|
@Test(expected = InteroperabilityException.class)
public void testLock() throws Exception {
final DropboxTouchFeature touch = new DropboxTouchFeature(session);
final Path file = touch.touch(new Path(new Path(new DefaultHomeFinderService(session).find(), "Projects", EnumSet.of(Path.Type.directory, Path.Type.volume, Path.Type.shared)).withAttributes(new PathAttributes().withFileId("7581509952")),
new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus());
final DropboxLockFeature f = new DropboxLockFeature(session);
final String lock = f.lock(file);
assertNotNull(lock);
assertEquals(lock, new DropboxAttributesFinderFeature(session).find(file).getLockId());
f.unlock(file, lock);
new DropboxDeleteFeature(session).delete(Collections.singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
@Override
public void accept(Point newPoint) {
updateTimeAndConfirmOrdering(newPoint.time());
List<SearchResult<Point, Object>> pointsWithinRange = mTree.getAllWithinRange(
newPoint,
DISTANCE_THRESHOLD
);
//add after search so the "newPoint" isn't in the pointsWithinRange data
mTree.put(newPoint, null);
makeAndPublishPairs(pointsWithinRange, newPoint);
periodicallyPerformCleanUp(newPoint.time());
sizeHighWaterMark = Math.max(sizeHighWaterMark, this.size());
}
|
@Test
public void testUnorderedPoints() {
DistanceMetric<Point> metric = new PointDistanceMetric(1.0, 1.0);
double DISTANCE_THRESHOLD = 1250.0;
TestSink sink = new TestSink();
PointPairFinder pairer = new PointPairFinder(Duration.ofSeconds(13), metric, DISTANCE_THRESHOLD, sink);
Instant time1 = Instant.EPOCH;
Instant time2 = Instant.EPOCH.minusSeconds(1);
Point p1 = (new PointBuilder())
.time(time1).latLong(0.0, 0.0).altitude(Distance.ofFeet(0.0)).build();
Point p2 = (new PointBuilder())
.time(time2).latLong(0.0, 0.0).altitude(Distance.ofFeet(0.0)).build();
pairer.accept(p1);
assertThrows(
IllegalArgumentException.class,
() -> pairer.accept(p2),
"Fail because input data is not sorted by time"
);
}
|
@Override
public void run() {
try { // make sure we call afterRun() even on crashes
// and operate countdown latches, else we may hang the parallel runner
if (steps == null) {
beforeRun();
}
if (skipped) {
return;
}
int count = steps.size();
int index = 0;
while ((index = nextStepIndex()) < count) {
currentStep = steps.get(index);
execute(currentStep);
if (currentStepResult != null) { // can be null if debug step-back or hook skip
result.addStepResult(currentStepResult);
}
}
} catch (Exception e) {
if (currentStepResult != null) {
result.addStepResult(currentStepResult);
}
logError("scenario [run] failed\n" + StringUtils.throwableToString(e));
currentStepResult = result.addFakeStepResult("scenario [run] failed", e);
} finally {
if (!skipped) {
afterRun();
if (isFailed() && engine.getConfig().isAbortSuiteOnFailure()) {
featureRuntime.suite.abort();
}
}
if (caller.isNone()) {
logAppender.close(); // reclaim memory
}
}
}
|
@Test
void testTableWithInvalidVariableName() {
fail = true;
run(
"table table1 =",
"| col |",
"| foo |"
);
}
|
@Override
public boolean encode(
@NonNull Resource<GifDrawable> resource, @NonNull File file, @NonNull Options options) {
GifDrawable drawable = resource.get();
Transformation<Bitmap> transformation = drawable.getFrameTransformation();
boolean isTransformed = !(transformation instanceof UnitTransformation);
if (isTransformed && options.get(ENCODE_TRANSFORMATION)) {
return encodeTransformedToFile(drawable, file);
} else {
return writeDataDirect(drawable.getBuffer(), file);
}
}
|
@Test
public void testEncode_withEncodeTransformationFalse_writesSourceDataToStream()
throws IOException {
options.set(ReEncodingGifResourceEncoder.ENCODE_TRANSFORMATION, false);
String expected = "testString";
byte[] data = expected.getBytes("UTF-8");
when(gifDrawable.getBuffer()).thenReturn(ByteBuffer.wrap(data));
assertTrue(encoder.encode(resource, file, options));
assertThat(getEncodedData()).isEqualTo(expected);
}
|
public ScalarOperator rewrite(ScalarOperator origin) {
if (origin == null) {
return null;
}
return origin.clone().accept(rewriter, null);
}
|
@Test
public void testRecursiveWithChildren() {
Map<ColumnRefOperator, ScalarOperator> operatorMap = Maps.newHashMap();
ColumnRefOperator columnRef1 = createColumnRef(1);
ColumnRefOperator columnRef2 = createColumnRef(2);
ColumnRefOperator columnRef3 = createColumnRef(3);
BinaryPredicateOperator binary = new BinaryPredicateOperator(BinaryType.EQ, columnRef2,
ConstantOperator.createInt(1));
operatorMap.put(columnRef1, binary);
operatorMap.put(columnRef2, columnRef3);
ReplaceColumnRefRewriter rewriter = new ReplaceColumnRefRewriter(operatorMap, true);
ColumnRefOperator source = createColumnRef(1);
ScalarOperator target = rewriter.rewrite(source);
Assert.assertTrue(target instanceof BinaryPredicateOperator);
BinaryPredicateOperator rewritten = (BinaryPredicateOperator) target;
BinaryPredicateOperator result = new BinaryPredicateOperator(BinaryType.EQ, columnRef3,
ConstantOperator.createInt(1));
Assert.assertEquals(result, rewritten);
Map<ColumnRefOperator, ScalarOperator> operatorMap2 = Maps.newHashMap();
operatorMap.put(columnRef1, columnRef1);
ReplaceColumnRefRewriter rewriter2 = new ReplaceColumnRefRewriter(operatorMap2, true);
ScalarOperator result2 = rewriter2.rewrite(columnRef1);
Assert.assertEquals(columnRef1, result2);
}
|
@Override
@SuppressWarnings("nullness")
public List<Map<String, Object>> readTable(String tableName) {
LOG.info("Reading all rows from {}.{}", databaseName, tableName);
List<Map<String, Object>> result = runSQLQuery(String.format("SELECT * FROM %s", tableName));
LOG.info("Successfully loaded rows from {}.{}", databaseName, tableName);
return result;
}
|
@Test
public void testReadTableShouldThrowErrorWhenJDBCFailsToExecuteSQL() throws SQLException {
when(container.getHost()).thenReturn(HOST);
when(container.getMappedPort(JDBC_PORT)).thenReturn(MAPPED_PORT);
Statement statement = driver.getConnection(any(), any(), any()).createStatement();
doThrow(SQLException.class).when(statement).executeQuery(anyString());
assertThrows(JDBCResourceManagerException.class, () -> testManager.readTable(TABLE_NAME));
}
|
@Override
public void trace(String msg) {
logger.trace(msg);
}
|
@Test
void testMarkerTraceWithException() {
Exception exception = new Exception();
jobRunrDashboardLogger.trace(marker, "trace", exception);
verify(slfLogger).trace(marker, "trace", exception);
}
|
static void executeRunnable(Observation observation, Runnable runnable) {
decorateRunnable(observation, runnable).run();
}
|
@Test
public void shouldExecuteRunnable() throws Throwable {
Observations.executeRunnable(observation, helloWorldService::sayHelloWorld);
assertThatObservationWasStartedAndFinishedWithoutErrors();
then(helloWorldService).should(times(1)).sayHelloWorld();
}
|
@Override
public Collection<SQLToken> generateSQLTokens(final InsertStatementContext insertStatementContext) {
Collection<SQLToken> result = new LinkedList<>();
EncryptTable encryptTable = encryptRule.getEncryptTable(insertStatementContext.getSqlStatement().getTable().getTableName().getIdentifier().getValue());
for (ColumnSegment each : insertStatementContext.getSqlStatement().getColumns()) {
List<String> derivedColumnNames = getDerivedColumnNames(encryptTable, each);
if (!derivedColumnNames.isEmpty()) {
result.add(new InsertColumnsToken(each.getStopIndex() + 1, derivedColumnNames));
}
}
return result;
}
|
@Test
void assertGenerateSQLTokensNotContainColumns() {
EncryptInsertDerivedColumnsTokenGenerator tokenGenerator = new EncryptInsertDerivedColumnsTokenGenerator(mockEncryptRule());
InsertStatementContext insertStatementContext = mock(InsertStatementContext.class, RETURNS_DEEP_STUBS);
when(insertStatementContext.getSqlStatement().getTable().getTableName().getIdentifier().getValue()).thenReturn("foo_tbl");
assertTrue(tokenGenerator.generateSQLTokens(insertStatementContext).isEmpty());
}
|
@Override
public Path move(final Path source, final Path target, final TransferStatus status, final Delete.Callback delete, final ConnectionCallback callback) throws BackgroundException {
final Path copy = proxy.copy(source, target, status.withLength(source.attributes().getSize()), callback, new DisabledStreamListener());
new B2DeleteFeature(session, fileid).delete(Collections.singletonList(new Path(source)), callback, delete);
return copy;
}
|
@Test
public void testMove() throws Exception {
final B2VersionIdProvider fileid = new B2VersionIdProvider(session);
final Path container = new Path("test-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume));
final String name = new AlphanumericRandomStringService().random();
final Path test = new B2TouchFeature(session, fileid).touch(new Path(container, name, EnumSet.of(Path.Type.file)), new TransferStatus());
assertTrue(new B2FindFeature(session, fileid).find(test));
final Path target = new B2MoveFeature(session, fileid).move(test,
new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus(), new Delete.DisabledCallback(), new DisabledConnectionCallback());
assertNotEquals(test.attributes().getVersionId(), target.attributes().getVersionId());
assertFalse(new B2FindFeature(session, fileid).find(new Path(container, name, EnumSet.of(Path.Type.file))));
assertTrue(new B2FindFeature(session, fileid).find(target));
final PathAttributes targetAttr = new B2AttributesFinderFeature(session, fileid).find(target);
assertEquals(Comparison.equal, session.getHost().getProtocol().getFeature(ComparisonService.class).compare(Path.Type.file, target.attributes(), targetAttr));
new B2DeleteFeature(session, fileid).delete(Collections.<Path>singletonList(target), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
@Override
public Collection<Integer> getOutboundPorts(EndpointQualifier endpointQualifier) {
final AdvancedNetworkConfig advancedNetworkConfig = node.getConfig().getAdvancedNetworkConfig();
if (advancedNetworkConfig.isEnabled()) {
EndpointConfig endpointConfig = advancedNetworkConfig.getEndpointConfigs().get(endpointQualifier);
final Collection<Integer> outboundPorts = endpointConfig != null
? endpointConfig.getOutboundPorts() : Collections.emptyList();
final Collection<String> outboundPortDefinitions = endpointConfig != null
? endpointConfig.getOutboundPortDefinitions() : Collections.emptyList();
return AddressUtil.getOutboundPorts(outboundPorts, outboundPortDefinitions);
}
final NetworkConfig networkConfig = node.getConfig().getNetworkConfig();
final Collection<Integer> outboundPorts = networkConfig.getOutboundPorts();
final Collection<String> outboundPortDefinitions = networkConfig.getOutboundPortDefinitions();
return AddressUtil.getOutboundPorts(outboundPorts, outboundPortDefinitions);
}
|
@Test
public void testGetOutboundPorts_acceptsZero() {
networkConfig.addOutboundPortDefinition("0");
Collection<Integer> outboundPorts = serverContext.getOutboundPorts(MEMBER);
assertEquals(0, outboundPorts.size());
}
|
public static RedissonClient create() {
Config config = new Config();
config.useSingleServer()
.setAddress("redis://127.0.0.1:6379");
return create(config);
}
|
@Test
public void testMasterSlaveConnectionFail() {
Assertions.assertThrows(RedisConnectionException.class, () -> {
Config config = new Config();
config.useMasterSlaveServers()
.setMasterAddress("redis://127.99.0.1:1111")
.addSlaveAddress("redis://127.99.0.2:1111");
Redisson.create(config);
Thread.sleep(1500);
});
}
|
public static <T> T fillBean(String className, Map<List<String>, Object> params, ClassLoader classLoader) {
return fillBean(errorEmptyMessage(), className, params, classLoader);
}
|
@Test(expected = ScenarioException.class)
public void fillBeanFailTest() {
Map<List<String>, Object> paramsToSet = new HashMap<>();
paramsToSet.put(List.of("fakeField"), null);
ScenarioBeanUtil.fillBean(errorEmptyMessage(), Dispute.class.getCanonicalName(), paramsToSet, classLoader);
}
|
@Override
@Transactional(rollbackFor = Exception.class) // 添加事务,异常则回滚所有导入
public UserImportRespVO importUserList(List<UserImportExcelVO> importUsers, boolean isUpdateSupport) {
// 1.1 参数校验
if (CollUtil.isEmpty(importUsers)) {
throw exception(USER_IMPORT_LIST_IS_EMPTY);
}
// 1.2 初始化密码不能为空
String initPassword = configApi.getConfigValueByKey(USER_INIT_PASSWORD_KEY).getCheckedData();
if (StrUtil.isEmpty(initPassword)) {
throw exception(USER_IMPORT_INIT_PASSWORD);
}
// 2. 遍历,逐个创建 or 更新
UserImportRespVO respVO = UserImportRespVO.builder().createUsernames(new ArrayList<>())
.updateUsernames(new ArrayList<>()).failureUsernames(new LinkedHashMap<>()).build();
importUsers.forEach(importUser -> {
// 2.1.1 校验字段是否符合要求
try {
ValidationUtils.validate(BeanUtils.toBean(importUser, UserSaveReqVO.class).setPassword(initPassword));
} catch (ConstraintViolationException ex){
respVO.getFailureUsernames().put(importUser.getUsername(), ex.getMessage());
return;
}
// 2.1.2 校验,判断是否有不符合的原因
try {
validateUserForCreateOrUpdate(null, null, importUser.getMobile(), importUser.getEmail(),
importUser.getDeptId(), null);
} catch (ServiceException ex) {
respVO.getFailureUsernames().put(importUser.getUsername(), ex.getMessage());
return;
}
// 2.2.1 判断如果不存在,在进行插入
AdminUserDO existUser = userMapper.selectByUsername(importUser.getUsername());
if (existUser == null) {
userMapper.insert(BeanUtils.toBean(importUser, AdminUserDO.class)
.setPassword(encodePassword(initPassword)).setPostIds(new HashSet<>())); // 设置默认密码及空岗位编号数组
respVO.getCreateUsernames().add(importUser.getUsername());
return;
}
// 2.2.2 如果存在,判断是否允许更新
if (!isUpdateSupport) {
respVO.getFailureUsernames().put(importUser.getUsername(), USER_USERNAME_EXISTS.getMsg());
return;
}
AdminUserDO updateUser = BeanUtils.toBean(importUser, AdminUserDO.class);
updateUser.setId(existUser.getId());
userMapper.updateById(updateUser);
respVO.getUpdateUsernames().add(importUser.getUsername());
});
return respVO;
}
|
@Test
public void testImportUserList_04() {
// mock 数据
AdminUserDO dbUser = randomAdminUserDO();
userMapper.insert(dbUser);
// 准备参数
UserImportExcelVO importUser = randomPojo(UserImportExcelVO.class, o -> {
o.setStatus(randomEle(CommonStatusEnum.values()).getStatus()); // 保证 status 的范围
o.setSex(randomEle(SexEnum.values()).getSex()); // 保证 sex 的范围
o.setUsername(dbUser.getUsername());
o.setEmail(randomEmail());
o.setMobile(randomMobile());
});
// mock deptService 的方法
DeptDO dept = randomPojo(DeptDO.class, o -> {
o.setId(importUser.getDeptId());
o.setStatus(CommonStatusEnum.ENABLE.getStatus());
});
when(deptService.getDept(eq(dept.getId()))).thenReturn(dept);
// 调用
UserImportRespVO respVO = userService.importUserList(newArrayList(importUser), true);
// 断言
assertEquals(0, respVO.getCreateUsernames().size());
assertEquals(1, respVO.getUpdateUsernames().size());
AdminUserDO user = userMapper.selectByUsername(respVO.getUpdateUsernames().get(0));
assertPojoEquals(importUser, user);
assertEquals(0, respVO.getFailureUsernames().size());
}
|
@Override
public void process(Object elem) throws Exception {
try (Closeable scope = context.enterProcess()) {
checkStarted();
Receiver receiver = receivers[0];
if (receiver != null) {
receiver.process(elem);
}
}
}
|
@Test
public void testRunFlattenOperation() throws Exception {
TestOutputReceiver receiver =
new TestOutputReceiver(
counterSet, NameContext.create("test", "receiver", "receiver", "receiver"));
OperationContext context = TestOperationContext.create(counterSet, nameContext);
FlattenOperation flattenOperation = new FlattenOperation(receiver, context);
flattenOperation.start();
flattenOperation.process("hi");
flattenOperation.process("there");
flattenOperation.process("");
flattenOperation.process("bob");
flattenOperation.finish();
assertThat(receiver.outputElems, CoreMatchers.<Object>hasItems("hi", "there", "", "bob"));
CounterUpdateExtractor<?> updateExtractor = Mockito.mock(CounterUpdateExtractor.class);
counterSet.extractUpdates(false, updateExtractor);
verify(updateExtractor).longSum(getObjectCounterName("test_receiver_out"), false, 4L);
verify(updateExtractor)
.longMean(
getMeanByteCounterName("test_receiver_out"),
false,
LongCounterMean.ZERO.addValue(14L, 4));
verifyNoMoreInteractions(updateExtractor);
}
|
public void setCreateTimeMills(Long createTimeMills) {
this.createTimeMills = createTimeMills;
}
|
@Test
public void testSetCreateTimeMills() {
long newCreateTimeMills = System.currentTimeMillis() + 2000;
accAndTimeStamp.setCreateTimeMills(newCreateTimeMills);
assertEquals("Create time should be set to new value", newCreateTimeMills, accAndTimeStamp.getCreateTimeMills().longValue());
}
|
@Override
public KStream<K, V> merge(final KStream<K, V> stream) {
return merge(stream, NamedInternal.empty());
}
|
@Test
public void shouldNotAllowNullNamedOnMerge() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.merge(testStream, null));
assertThat(exception.getMessage(), equalTo("named can't be null"));
}
|
@Override
public List<List<Object>> getInsertRows(Collection<Integer> primaryKeyIndex) {
if (ast.getTop() != null) {
//deal with top sql
dealTop(ast);
}
List<SQLInsertStatement.ValuesClause> valuesClauses = ast.getValuesList();
List<List<Object>> rows = new ArrayList<>(valuesClauses.size());
for (SQLInsertStatement.ValuesClause valuesClause : valuesClauses) {
List<SQLExpr> exprList = valuesClause.getValues();
List<Object> row = new ArrayList<>(exprList.size());
rows.add(row);
for (int i = 0, len = exprList.size(); i < len; i++) {
SQLExpr expr = exprList.get(i);
if (expr instanceof SQLNullExpr) {
row.add(Null.get());
} else if (expr instanceof SQLValuableExpr) {
row.add(((SQLValuableExpr) expr).getValue());
} else if (expr instanceof SQLVariantRefExpr) {
//add '?'
row.add(((SQLVariantRefExpr) expr).getName());
} else if (expr instanceof SQLMethodInvokeExpr) {
row.add(SqlMethodExpr.get());
} else if (expr instanceof SQLDefaultExpr) {
row.add(SqlDefaultExpr.get());
} else if (expr instanceof SQLSequenceExpr) {
//Supported only since 2012 version of SQL Server,use next value for
SQLSequenceExpr sequenceExpr = (SQLSequenceExpr) expr;
String sequence = sequenceExpr.getSequence().getSimpleName();
String function = sequenceExpr.getFunction().name;
row.add(new SqlSequenceExpr(sequence, function));
} else {
if (primaryKeyIndex.contains(i)) {
wrapSQLParsingException(expr);
}
row.add(NotPlaceholderExpr.get());
}
}
}
return rows;
}
|
@Test
public void testGetInsertRows() {
//test for null value
String sql = "insert into t(id, no, name, age, time) values (default, null, 'a', ?, now())";
SQLStatement ast = getSQLStatement(sql);
SqlServerInsertRecognizer recognizer = new SqlServerInsertRecognizer(sql, ast);
List<List<Object>> insertRows = recognizer.getInsertRows(Collections.singletonList(pkIndex));
Assertions.assertEquals(1, insertRows.size());
//test for sequence
sql = "insert into t(id) values(next value for t1.id)";
ast = getSQLStatement(sql);
recognizer = new SqlServerInsertRecognizer(sql, ast);
insertRows = recognizer.getInsertRows(Collections.singletonList(pkIndex));
Assertions.assertEquals(1, insertRows.size());
//test for top
Assertions.assertThrows(NotSupportYetException.class, () -> {
String s = "insert top(1) into t(id) values(id1)";
SQLStatement sqlStatement = getSQLStatement(s);
SqlServerInsertRecognizer sqlServerInsertRecognizer = new SqlServerInsertRecognizer(s, sqlStatement);
sqlServerInsertRecognizer.getInsertRows(Collections.singletonList(pkIndex));
});
//test for exception
Assertions.assertThrows(SQLParsingException.class, () -> {
String s = "insert into t(a) values (?)";
SQLStatement sqlStatement = getSQLStatement(s);
SQLInsertStatement sqlInsertStatement = (SQLInsertStatement) sqlStatement;
sqlInsertStatement.getValuesList().get(0).getValues().set(pkIndex, new MySqlOrderingExpr());
SqlServerInsertRecognizer sqlServerInsertRecognizer = new SqlServerInsertRecognizer(s, sqlInsertStatement);
sqlServerInsertRecognizer.getInsertRows(Collections.singletonList(pkIndex));
});
}
|
public NodeMgr getNodeMgr() {
return nodeMgr;
}
|
@Test
public void testReplayUpdateFrontend() throws Exception {
GlobalStateMgr globalStateMgr = mockGlobalStateMgr();
List<Frontend> frontends = globalStateMgr.getNodeMgr().getFrontends(null);
Frontend fe = frontends.get(0);
fe.updateHostAndEditLogPort("testHost", 1000);
globalStateMgr.getNodeMgr().replayUpdateFrontend(fe);
List<Frontend> updatedFrontends = globalStateMgr.getNodeMgr().getFrontends(null);
Frontend updatedfFe = updatedFrontends.get(0);
Assert.assertEquals("testHost", updatedfFe.getHost());
Assert.assertTrue(updatedfFe.getEditLogPort() == 1000);
}
|
public static Pair<Optional<Method>, Optional<TypedExpression>> resolveMethodWithEmptyCollectionArguments(
final MethodCallExpr methodExpression,
final MvelCompilerContext mvelCompilerContext,
final Optional<TypedExpression> scope,
List<TypedExpression> arguments,
List<Integer> emptyCollectionArgumentsIndexes) {
Objects.requireNonNull(methodExpression, "MethodExpression parameter cannot be null as the method searches methods based on this expression!");
Objects.requireNonNull(mvelCompilerContext, "MvelCompilerContext parameter cannot be null!");
Objects.requireNonNull(arguments, "Arguments parameter cannot be null! Use an empty list instance if needed instead.");
Objects.requireNonNull(emptyCollectionArgumentsIndexes, "EmptyListArgumentIndexes parameter cannot be null! Use an empty list instance if needed instead.");
if (emptyCollectionArgumentsIndexes.size() > arguments.size()) {
throw new IllegalArgumentException("There cannot be more empty collection arguments than all arguments! emptyCollectionArgumentsIndexes parameter has more items than arguments parameter. "
+ "(" + emptyCollectionArgumentsIndexes.size() + " > " + arguments.size() + ")");
} else {
final List<TypedExpression> coercedArgumentsTypesList = new ArrayList<>(arguments);
Pair<Optional<Method>, Optional<TypedExpression>> resolveMethodResult =
MethodResolutionUtils.resolveMethod(methodExpression, mvelCompilerContext, scope, coercedArgumentsTypesList);
if (resolveMethodResult.a.isPresent()) {
return resolveMethodResult;
} else {
// Rather work only with the argumentsType and when a method is resolved, flip the arguments list based on it.
// This needs to go through all possible combinations.
final int indexesListSize = emptyCollectionArgumentsIndexes.size();
for (int numberOfProcessedIndexes = 0; numberOfProcessedIndexes < indexesListSize; numberOfProcessedIndexes++) {
for (int indexOfEmptyListIndex = numberOfProcessedIndexes; indexOfEmptyListIndex < indexesListSize; indexOfEmptyListIndex++) {
switchCollectionClassInArgumentsByIndex(coercedArgumentsTypesList, emptyCollectionArgumentsIndexes.get(indexOfEmptyListIndex));
resolveMethodResult =
MethodResolutionUtils.resolveMethod(methodExpression, mvelCompilerContext, scope, coercedArgumentsTypesList);
if (resolveMethodResult.a.isPresent()) {
modifyArgumentsBasedOnCoercedCollectionArguments(arguments, coercedArgumentsTypesList);
return resolveMethodResult;
}
switchCollectionClassInArgumentsByIndex(coercedArgumentsTypesList, emptyCollectionArgumentsIndexes.get(indexOfEmptyListIndex));
}
switchCollectionClassInArgumentsByIndex(coercedArgumentsTypesList, emptyCollectionArgumentsIndexes.get(numberOfProcessedIndexes));
}
// No method found, return empty.
return new Pair<>(Optional.empty(), scope);
}
}
}
|
@Test
public void resolveMethodWithEmptyCollectionArgumentsCoerceListAndMap() {
final MethodCallExpr methodExpression = new MethodCallExpr("setAddressesAndItems", new MapCreationLiteralExpression(null, NodeList.nodeList()));
final List<TypedExpression> arguments = new ArrayList<>();
arguments.add(new MapExprT(new MapCreationLiteralExpression(null, NodeList.nodeList())));
arguments.add(new ListExprT(new ListCreationLiteralExpression(null, NodeList.nodeList())));
final TypedExpression scope = new ObjectCreationExpressionT(Collections.emptyList(), Person.class);
final Pair<Optional<Method>, Optional<TypedExpression>> resolvedMethodResult =
MethodResolutionUtils.resolveMethodWithEmptyCollectionArguments(
methodExpression,
new MvelCompilerContext(null),
Optional.of(scope),
arguments,
List.of(0, 1));
Assertions.assertThat(resolvedMethodResult.a).isPresent();
Assertions.assertThat(getTypedExpressionsClasses(arguments))
.containsExactlyElementsOf(List.of(ListExprT.class, MapExprT.class));
}
|
@Override
public DescribeReplicaLogDirsResult describeReplicaLogDirs(Collection<TopicPartitionReplica> replicas, DescribeReplicaLogDirsOptions options) {
final Map<TopicPartitionReplica, KafkaFutureImpl<DescribeReplicaLogDirsResult.ReplicaLogDirInfo>> futures = new HashMap<>(replicas.size());
for (TopicPartitionReplica replica : replicas) {
futures.put(replica, new KafkaFutureImpl<>());
}
Map<Integer, DescribeLogDirsRequestData> partitionsByBroker = new HashMap<>();
for (TopicPartitionReplica replica: replicas) {
DescribeLogDirsRequestData requestData = partitionsByBroker.computeIfAbsent(replica.brokerId(),
brokerId -> new DescribeLogDirsRequestData());
DescribableLogDirTopic describableLogDirTopic = requestData.topics().find(replica.topic());
if (describableLogDirTopic == null) {
List<Integer> partitions = new ArrayList<>();
partitions.add(replica.partition());
describableLogDirTopic = new DescribableLogDirTopic().setTopic(replica.topic())
.setPartitions(partitions);
requestData.topics().add(describableLogDirTopic);
} else {
describableLogDirTopic.partitions().add(replica.partition());
}
}
final long now = time.milliseconds();
for (Map.Entry<Integer, DescribeLogDirsRequestData> entry: partitionsByBroker.entrySet()) {
final int brokerId = entry.getKey();
final DescribeLogDirsRequestData topicPartitions = entry.getValue();
final Map<TopicPartition, ReplicaLogDirInfo> replicaDirInfoByPartition = new HashMap<>();
for (DescribableLogDirTopic topicPartition: topicPartitions.topics()) {
for (Integer partitionId : topicPartition.partitions()) {
replicaDirInfoByPartition.put(new TopicPartition(topicPartition.topic(), partitionId), new ReplicaLogDirInfo());
}
}
runnable.call(new Call("describeReplicaLogDirs", calcDeadlineMs(now, options.timeoutMs()),
new ConstantNodeIdProvider(brokerId)) {
@Override
public DescribeLogDirsRequest.Builder createRequest(int timeoutMs) {
// Query selected partitions in all log directories
return new DescribeLogDirsRequest.Builder(topicPartitions);
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
DescribeLogDirsResponse response = (DescribeLogDirsResponse) abstractResponse;
for (Map.Entry<String, LogDirDescription> responseEntry: logDirDescriptions(response).entrySet()) {
String logDir = responseEntry.getKey();
LogDirDescription logDirInfo = responseEntry.getValue();
// No replica info will be provided if the log directory is offline
if (logDirInfo.error() instanceof KafkaStorageException)
continue;
if (logDirInfo.error() != null)
handleFailure(new IllegalStateException(
"The error " + logDirInfo.error().getClass().getName() + " for log directory " + logDir + " in the response from broker " + brokerId + " is illegal"));
for (Map.Entry<TopicPartition, ReplicaInfo> replicaInfoEntry: logDirInfo.replicaInfos().entrySet()) {
TopicPartition tp = replicaInfoEntry.getKey();
ReplicaInfo replicaInfo = replicaInfoEntry.getValue();
ReplicaLogDirInfo replicaLogDirInfo = replicaDirInfoByPartition.get(tp);
if (replicaLogDirInfo == null) {
log.warn("Server response from broker {} mentioned unknown partition {}", brokerId, tp);
} else if (replicaInfo.isFuture()) {
replicaDirInfoByPartition.put(tp, new ReplicaLogDirInfo(replicaLogDirInfo.getCurrentReplicaLogDir(),
replicaLogDirInfo.getCurrentReplicaOffsetLag(),
logDir,
replicaInfo.offsetLag()));
} else {
replicaDirInfoByPartition.put(tp, new ReplicaLogDirInfo(logDir,
replicaInfo.offsetLag(),
replicaLogDirInfo.getFutureReplicaLogDir(),
replicaLogDirInfo.getFutureReplicaOffsetLag()));
}
}
}
for (Map.Entry<TopicPartition, ReplicaLogDirInfo> entry: replicaDirInfoByPartition.entrySet()) {
TopicPartition tp = entry.getKey();
KafkaFutureImpl<ReplicaLogDirInfo> future = futures.get(new TopicPartitionReplica(tp.topic(), tp.partition(), brokerId));
future.complete(entry.getValue());
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(futures.values(), throwable);
}
}, now);
}
return new DescribeReplicaLogDirsResult(new HashMap<>(futures));
}
|
@Test
public void testDescribeReplicaLogDirsWithNonExistReplica() throws Exception {
int brokerId = 0;
TopicPartitionReplica tpr1 = new TopicPartitionReplica("topic1", 12, brokerId);
TopicPartitionReplica tpr2 = new TopicPartitionReplica("topic2", 12, brokerId);
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
String logDir = "/var/data/kafka0";
int offsetLag = 1;
int defaultOffsetLag = -1;
env.kafkaClient().prepareResponseFrom(
new DescribeLogDirsResponse(
new DescribeLogDirsResponseData().setResults(singletonList(
prepareDescribeLogDirsResult(tpr1, logDir, 123456, offsetLag, false)))),
env.cluster().nodeById(brokerId));
DescribeReplicaLogDirsResult result = env.adminClient().describeReplicaLogDirs(asList(tpr1, tpr2));
Map<TopicPartitionReplica, KafkaFuture<DescribeReplicaLogDirsResult.ReplicaLogDirInfo>> values = result.values();
assertEquals(logDir, values.get(tpr1).get().getCurrentReplicaLogDir());
assertNull(values.get(tpr1).get().getFutureReplicaLogDir());
assertEquals(offsetLag, values.get(tpr1).get().getCurrentReplicaOffsetLag());
assertEquals(defaultOffsetLag, values.get(tpr1).get().getFutureReplicaOffsetLag());
assertNull(values.get(tpr2).get().getCurrentReplicaLogDir());
assertNull(values.get(tpr2).get().getFutureReplicaLogDir());
assertEquals(defaultOffsetLag, values.get(tpr2).get().getCurrentReplicaOffsetLag());
assertEquals(defaultOffsetLag, values.get(tpr2).get().getFutureReplicaOffsetLag());
}
}
|
@Override
public GroupCoordinatorMetricsShard newMetricsShard(SnapshotRegistry snapshotRegistry, TopicPartition tp) {
return new GroupCoordinatorMetricsShard(snapshotRegistry, globalSensors, tp);
}
|
@Test
public void testGlobalSensors() {
MetricsRegistry registry = new MetricsRegistry();
Time time = new MockTime();
Metrics metrics = new Metrics(time);
GroupCoordinatorMetrics coordinatorMetrics = new GroupCoordinatorMetrics(registry, metrics);
GroupCoordinatorMetricsShard shard = coordinatorMetrics.newMetricsShard(
new SnapshotRegistry(new LogContext()), new TopicPartition("__consumer_offsets", 0)
);
shard.record(CLASSIC_GROUP_COMPLETED_REBALANCES_SENSOR_NAME, 10);
assertMetricValue(metrics, metrics.metricName("group-completed-rebalance-rate", GroupCoordinatorMetrics.METRICS_GROUP), 1.0 / 3.0);
assertMetricValue(metrics, metrics.metricName("group-completed-rebalance-count", GroupCoordinatorMetrics.METRICS_GROUP), 10);
shard.record(OFFSET_COMMITS_SENSOR_NAME, 20);
assertMetricValue(metrics, metrics.metricName("offset-commit-rate", GroupCoordinatorMetrics.METRICS_GROUP), 2.0 / 3.0);
assertMetricValue(metrics, metrics.metricName("offset-commit-count", GroupCoordinatorMetrics.METRICS_GROUP), 20);
shard.record(OFFSET_EXPIRED_SENSOR_NAME, 30);
assertMetricValue(metrics, metrics.metricName("offset-expiration-rate", GroupCoordinatorMetrics.METRICS_GROUP), 1.0);
assertMetricValue(metrics, metrics.metricName("offset-expiration-count", GroupCoordinatorMetrics.METRICS_GROUP), 30);
shard.record(CONSUMER_GROUP_REBALANCES_SENSOR_NAME, 50);
assertMetricValue(metrics, metrics.metricName("consumer-group-rebalance-rate", GroupCoordinatorMetrics.METRICS_GROUP), 5.0 / 3.0);
assertMetricValue(metrics, metrics.metricName("consumer-group-rebalance-count", GroupCoordinatorMetrics.METRICS_GROUP), 50);
}
|
public void processAf01(Af01 af01, Afnemersbericht afnemersbericht){
afnemersberichtRepository.delete(afnemersbericht);
logger.info("Finished processing Af01 message");
}
|
@Test
public void testProcessAf01(){
String testBsn = "SSSSSSSSS";
Af01 testAf01 = TestDglMessagesUtil.createTestAf01(testBsn);
classUnderTest.processAf01(testAf01, afnemersbericht);
verify(afnemersberichtRepository, times(1)).delete(afnemersbericht);
}
|
public static String validateIndexName(@Nullable String indexName) {
checkDbIdentifier(indexName, "Index name", INDEX_NAME_MAX_SIZE);
return indexName;
}
|
@Test
public void validateIndexName_throws_IAE_when_index_name_contains_invalid_characters() {
assertThatThrownBy(() -> validateIndexName("(not/valid)"))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Index name must be lower case and contain only alphanumeric chars or '_', got '(not/valid)'");
}
|
@Override
public ExecuteResult execute(final ServiceContext serviceContext, final ConfiguredKsqlPlan plan,
final boolean restoreInProgress) {
try {
final ExecuteResult result = EngineExecutor
.create(primaryContext, serviceContext, plan.getConfig())
.execute(plan.getPlan(), restoreInProgress);
return result;
} catch (final KsqlStatementException e) {
throw e;
} catch (final KsqlException e) {
// add the statement text to the KsqlException
throw new KsqlStatementException(
e.getMessage(),
e.getMessage(),
plan.getPlan().getStatementText(),
e.getCause()
);
}
}
|
@Test
public void shouldFailDropStreamWhenAnInsertQueryIsWritingTheStream() {
// Given:
setupKsqlEngineWithSharedRuntimeEnabled();
KsqlEngineTestUtil.execute(
serviceContext,
ksqlEngine,
"create stream bar as select * from test1;"
+ "insert into bar select * from test1;",
ksqlConfig,
Collections.emptyMap()
);
// When:
final KsqlStatementException e = assertThrows(
KsqlStatementException.class,
() -> KsqlEngineTestUtil.execute(
serviceContext,
ksqlEngine,
"drop stream bar;",
ksqlConfig,
Collections.emptyMap()
)
);
// Then:
assertThat(e, rawMessage(is(
"Cannot drop BAR.\n"
+ "The following queries read from this source: [].\n"
+ "The following queries write into this source: [INSERTQUERY_1].\n"
+ "You need to terminate them before dropping BAR.")));
assertThat(e, statementText(is("drop stream bar;")));
}
|
@Override
public String getName() {
return "AwsCodeBuild";
}
|
@Test
public void getName() {
assertThat(underTest.getName()).isEqualTo("AwsCodeBuild");
}
|
protected static String getReverseZoneNetworkAddress(String baseIp, int range,
int index) throws UnknownHostException {
if (index < 0) {
throw new IllegalArgumentException(
String.format("Invalid index provided, must be positive: %d", index));
}
if (range < 0) {
throw new IllegalArgumentException(
String.format("Invalid range provided, cannot be negative: %d",
range));
}
return calculateIp(baseIp, range, index);
}
|
@Test
public void testVariousRangeAndIndexValues() throws Exception {
// Given the base address of 172.17.4.0, step 256 IP addresses, 5 times.
assertEquals("172.17.9.0",
ReverseZoneUtils.getReverseZoneNetworkAddress(NET, 256, 5));
assertEquals("172.17.4.128",
ReverseZoneUtils.getReverseZoneNetworkAddress(NET, 128, 1));
assertEquals("172.18.0.0",
ReverseZoneUtils.getReverseZoneNetworkAddress(NET, 256, 252));
assertEquals("172.17.12.0",
ReverseZoneUtils.getReverseZoneNetworkAddress(NET, 1024, 2));
assertEquals("172.17.4.0",
ReverseZoneUtils.getReverseZoneNetworkAddress(NET, 0, 1));
assertEquals("172.17.4.0",
ReverseZoneUtils.getReverseZoneNetworkAddress(NET, 1, 0));
assertEquals("172.17.4.1",
ReverseZoneUtils.getReverseZoneNetworkAddress(NET, 1, 1));
}
|
public Optional<Details> updateRuntimeOverview(
WorkflowSummary summary, WorkflowRuntimeOverview overview, Timeline timeline) {
return updateWorkflowInstance(summary, overview, timeline, null, 0);
}
|
@Test
public void testUpdateRuntimeOverview() {
WorkflowSummary summary = new WorkflowSummary();
summary.setWorkflowId(wfi.getWorkflowId());
summary.setWorkflowInstanceId(wfi.getWorkflowInstanceId());
summary.setWorkflowRunId(wfi.getWorkflowRunId());
instanceDao.executeWorkflowInstance(summary, "test_execution_id");
WorkflowRuntimeOverview overview =
WorkflowRuntimeOverview.of(
4,
singletonEnumMap(StepInstance.Status.RUNNING, WorkflowStepStatusSummary.of(4L)),
null);
Timeline timeline =
new Timeline(Collections.singletonList(TimelineLogEvent.info("hello world")));
Optional<Details> result = instanceDao.updateRuntimeOverview(summary, overview, timeline);
assertFalse(result.isPresent());
WorkflowInstance latestRun =
instanceDao.getLatestWorkflowInstanceRun(wfi.getWorkflowId(), wfi.getWorkflowInstanceId());
assertEquals(overview, latestRun.getRuntimeOverview());
assertEquals(timeline, latestRun.getTimeline());
assertEquals("test_execution_id", latestRun.getExecutionId());
}
|
public String nextNonCliCommand() {
String line;
do {
line = terminal.readLine();
} while (maybeHandleCliSpecificCommands(line));
return line;
}
|
@Test
public void shouldSupportCmdBeingTerminatedWithSemiColon() {
// Given:
when(lineSupplier.get())
.thenReturn(CLI_CMD_NAME + WHITE_SPACE + "Arg0;")
.thenReturn("not a CLI command;");
// When:
console.nextNonCliCommand();
// Then:
verify(cliCommand).execute(eq(ImmutableList.of("Arg0")), any());
}
|
public ProvisionResponse aggregate(ProvisionResponse other) {
if (_status == ProvisionStatus.UNDER_PROVISIONED) {
if (other.status() == ProvisionStatus.UNDER_PROVISIONED) {
aggregateRecommendations(other);
}
} else {
switch (other.status()) {
case UNDER_PROVISIONED:
_status = ProvisionStatus.UNDER_PROVISIONED;
clearRecommendation();
aggregateRecommendations(other);
break;
case RIGHT_SIZED:
_status = ProvisionStatus.RIGHT_SIZED;
clearRecommendation();
break;
case OVER_PROVISIONED:
if (_status == ProvisionStatus.OVER_PROVISIONED || _status == ProvisionStatus.UNDECIDED) {
_status = ProvisionStatus.OVER_PROVISIONED;
aggregateRecommendations(other);
break;
}
// Keep the status as right-sized if it was right-sized before.
break;
case UNDECIDED:
// Nothing to do.
break;
default:
throw new IllegalArgumentException("Unsupported provision status " + other + " is provided.");
}
}
return this;
}
|
@Test
public void testAggregate() {
// Verify validity of input while creating a ProvisionResponse using an invalid recommender and recommendation.
assertThrows(IllegalArgumentException.class, () -> new ProvisionResponse(ProvisionStatus.RIGHT_SIZED, OVER_PROV_REC, RECOMMENDER_UP));
assertThrows(IllegalArgumentException.class, () -> new ProvisionResponse(ProvisionStatus.UNDECIDED, OVER_PROV_REC, RECOMMENDER_UP));
// Verify validity of input while creating a ProvisionResponse using a valid recommendation, but an invalid (i.e. null) recommender.
assertThrows(IllegalArgumentException.class, () -> new ProvisionResponse(ProvisionStatus.OVER_PROVISIONED, OVER_PROV_REC, null));
assertThrows(IllegalArgumentException.class, () -> new ProvisionResponse(ProvisionStatus.UNDER_PROVISIONED, UNDER_PROV_REC, null));
// Verify validity of aggregation (1) state, (2) recommendation, and (3) provision recommendations.
// Case-1: Aggregating any provision status with {@link ProvisionStatus#UNDER_PROVISIONED} is {@link ProvisionStatus#UNDER_PROVISIONED}.
String recommender = "Case1";
for (ProvisionStatus status : ProvisionStatus.cachedValues()) {
ProvisionResponse underProvisioned = new ProvisionResponse(ProvisionStatus.UNDER_PROVISIONED, UNDER_PROV_REC, recommender);
underProvisioned.aggregate(generateProvisionResponse(status));
assertEquals(ProvisionStatus.UNDER_PROVISIONED, underProvisioned.status());
assertEquals(status == ProvisionStatus.UNDER_PROVISIONED
? String.format("[%s] %s [%s] %s", recommender, UNDER_PROV_REC_STR, RECOMMENDER_UP, UNDER_PROV_REC_STR)
: String.format("[%s] %s", recommender, UNDER_PROV_REC_STR), underProvisioned.recommendation());
assertEquals(status == ProvisionStatus.UNDER_PROVISIONED ? 2 : 1, underProvisioned.recommendationByRecommender().size());
assertEquals(NUM_BROKERS_UP, underProvisioned.recommendationByRecommender().get(recommender).numBrokers());
assertEquals(TYPICAL_BROKER_ID_UP, underProvisioned.recommendationByRecommender().get(recommender).typicalBrokerId());
assertEquals(TYPICAL_BROKER_CAPACITY_UP, underProvisioned.recommendationByRecommender().get(recommender).typicalBrokerCapacity(), DELTA);
if (status == ProvisionStatus.UNDER_PROVISIONED) {
assertEquals(NUM_BROKERS_UP, underProvisioned.recommendationByRecommender().get(RECOMMENDER_UP).numBrokers());
assertEquals(TYPICAL_BROKER_ID_UP, underProvisioned.recommendationByRecommender().get(RECOMMENDER_UP).typicalBrokerId());
assertEquals(TYPICAL_BROKER_CAPACITY_UP, underProvisioned.recommendationByRecommender().get(RECOMMENDER_UP).typicalBrokerCapacity(), DELTA);
}
}
// Case-2: Aggregating a provision status {@code P} with {@link ProvisionStatus#UNDECIDED} is {@code P}
for (ProvisionStatus status : ProvisionStatus.cachedValues()) {
ProvisionResponse undecided = new ProvisionResponse(ProvisionStatus.UNDECIDED);
ProvisionResponse other = generateProvisionResponse(status);
String recommendationBefore = other.recommendation();
Map<String, ProvisionRecommendation> recommendationByRecommenderBefore = other.recommendationByRecommender();
undecided.aggregate(other);
assertEquals(status, undecided.status());
assertEquals(recommendationBefore, undecided.recommendation());
assertEquals(recommendationByRecommenderBefore, undecided.recommendationByRecommender());
}
// Case-3.1: Aggregating {@link ProvisionStatus#RIGHT_SIZED} with {@link ProvisionStatus#RIGHT_SIZED} or
// {@link ProvisionStatus#OVER_PROVISIONED} is {@link ProvisionStatus#RIGHT_SIZED}
ProvisionResponse rightSized = new ProvisionResponse(ProvisionStatus.RIGHT_SIZED);
rightSized.aggregate(generateProvisionResponse(ProvisionStatus.RIGHT_SIZED));
assertEquals(ProvisionStatus.RIGHT_SIZED, rightSized.status());
assertTrue(rightSized.recommendationByRecommender().isEmpty());
rightSized.aggregate(generateProvisionResponse(ProvisionStatus.OVER_PROVISIONED));
assertEquals(ProvisionStatus.RIGHT_SIZED, rightSized.status());
assertTrue(rightSized.recommendation().isEmpty());
assertTrue(rightSized.recommendationByRecommender().isEmpty());
// Case-3.2: Aggregating {@link ProvisionStatus#OVER_PROVISIONED} with {@link ProvisionStatus#RIGHT_SIZED} clears the recommendation
recommender = "Case3.2";
ProvisionResponse overProvisioned = new ProvisionResponse(ProvisionStatus.OVER_PROVISIONED, OVER_PROV_REC, recommender);
assertFalse(overProvisioned.recommendation().isEmpty());
assertFalse(overProvisioned.recommendationByRecommender().isEmpty());
overProvisioned.aggregate(generateProvisionResponse(ProvisionStatus.RIGHT_SIZED));
assertTrue(overProvisioned.recommendation().isEmpty());
assertTrue(overProvisioned.recommendationByRecommender().isEmpty());
// Case-4: Aggregating {@link ProvisionStatus#OVER_PROVISIONED} with {@link ProvisionStatus#OVER_PROVISIONED} yields itself
recommender = "Case4";
overProvisioned = new ProvisionResponse(ProvisionStatus.OVER_PROVISIONED, OVER_PROV_REC, recommender);
assertEquals(String.format("[%s] %s", recommender, OVER_PROV_REC_STR), overProvisioned.recommendation());
assertEquals(1, overProvisioned.recommendationByRecommender().size());
assertEquals(NUM_BROKERS_OP, overProvisioned.recommendationByRecommender().get(recommender).numBrokers());
assertEquals(TYPICAL_BROKER_ID_OP, overProvisioned.recommendationByRecommender().get(recommender).typicalBrokerId());
assertEquals(TYPICAL_BROKER_CAPACITY_OP, overProvisioned.recommendationByRecommender().get(recommender).typicalBrokerCapacity(), DELTA);
overProvisioned.aggregate(generateProvisionResponse(ProvisionStatus.OVER_PROVISIONED));
assertEquals(ProvisionStatus.OVER_PROVISIONED, overProvisioned.status());
assertEquals(String.format("[%s] %s [%s] %s", recommender, OVER_PROV_REC_STR, RECOMMENDER_OP, OVER_PROV_REC_STR),
overProvisioned.recommendation());
assertEquals(2, overProvisioned.recommendationByRecommender().size());
assertEquals(NUM_BROKERS_OP, overProvisioned.recommendationByRecommender().get(RECOMMENDER_OP).numBrokers());
assertEquals(TYPICAL_BROKER_ID_OP, overProvisioned.recommendationByRecommender().get(RECOMMENDER_OP).typicalBrokerId());
assertEquals(TYPICAL_BROKER_CAPACITY_OP, overProvisioned.recommendationByRecommender().get(RECOMMENDER_OP).typicalBrokerCapacity(), DELTA);
}
|
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) {
return api.send(request);
}
|
@Test
public void getChatMenuButton() {
MenuButton menu = new MenuButtonCommands();
BaseResponse set = bot.execute(new SetChatMenuButton().chatId(chatId)
.menuButton(menu));
assertTrue(set.isOk());
GetChatMenuButtonResponse response = bot.execute(new GetChatMenuButton().chatId(chatId));
assertTrue(response.isOk());
assertEquals(menu.type(), response.result().type());
}
|
public SqlType getExpressionSqlType(final Expression expression) {
return getExpressionSqlType(expression, Collections.emptyMap());
}
|
@Test
public void shouldEvaluateTypeForCreateMapExpression() {
// Given:
Expression expression = new CreateMapExpression(
ImmutableMap.of(
COL3, new UnqualifiedColumnReferenceExp(COL0)
)
);
// When:
final SqlType type = expressionTypeManager.getExpressionSqlType(expression);
// Then:
assertThat(type, is(SqlTypes.map(SqlTypes.DOUBLE, SqlTypes.BIGINT)));
}
|
@Override
public MergedResult merge(final List<QueryResult> queryResults, final SQLStatementContext sqlStatementContext,
final ShardingSphereDatabase database, final ConnectionContext connectionContext) throws SQLException {
if (1 == queryResults.size() && !isNeedAggregateRewrite(sqlStatementContext)) {
return new IteratorStreamMergedResult(queryResults);
}
Map<String, Integer> columnLabelIndexMap = getColumnLabelIndexMap(queryResults.get(0));
SelectStatementContext selectStatementContext = (SelectStatementContext) sqlStatementContext;
selectStatementContext.setIndexes(columnLabelIndexMap);
MergedResult mergedResult = build(queryResults, selectStatementContext, columnLabelIndexMap, database);
return decorate(queryResults, selectStatementContext, mergedResult);
}
|
@Test
void assertBuildGroupByStreamMergedResultWithMySQLLimit() throws SQLException {
final ShardingDQLResultMerger resultMerger = new ShardingDQLResultMerger(TypedSPILoader.getService(DatabaseType.class, "MySQL"));
ShardingSphereDatabase database = mock(ShardingSphereDatabase.class, RETURNS_DEEP_STUBS);
when(database.getSchema(DefaultDatabase.LOGIC_NAME)).thenReturn(mock(ShardingSphereSchema.class));
MySQLSelectStatement selectStatement = (MySQLSelectStatement) buildSelectStatement(new MySQLSelectStatement());
selectStatement.setGroupBy(new GroupBySegment(0, 0, Collections.singletonList(new IndexOrderByItemSegment(0, 0, 1, OrderDirection.DESC, NullsOrderType.FIRST))));
selectStatement.setOrderBy(new OrderBySegment(0, 0, Collections.singletonList(new IndexOrderByItemSegment(0, 0, 1, OrderDirection.DESC, NullsOrderType.FIRST))));
selectStatement.setProjections(new ProjectionsSegment(0, 0));
selectStatement.setLimit(new LimitSegment(0, 0, new NumberLiteralLimitValueSegment(0, 0, 1L), null));
SelectStatementContext selectStatementContext = new SelectStatementContext(createShardingSphereMetaData(database), Collections.emptyList(),
selectStatement, DefaultDatabase.LOGIC_NAME, Collections.emptyList());
MergedResult actual = resultMerger.merge(createQueryResults(), selectStatementContext, createDatabase(), mock(ConnectionContext.class));
assertThat(actual, instanceOf(LimitDecoratorMergedResult.class));
assertThat(((LimitDecoratorMergedResult) actual).getMergedResult(), instanceOf(GroupByStreamMergedResult.class));
}
|
public ReplicatedMapIterationService getIterationService() {
return iterationService;
}
|
@Test
public void testCleanups_StaleIterators() {
String name = randomMapName();
for (int i = 0; i < 10000; i++) {
hazelcastInstance.getReplicatedMap(name).put(i, i);
}
ReplicatedMapService service = getNodeEngineImpl(hazelcastInstance).getService(ReplicatedMapService.SERVICE_NAME);
service.getIterationService().createIterator(name, 0, UuidUtil.newUnsecureUUID());
assertThat(service.getIterationService().getIteratorManager().getKeySet()).hasSize(1);
assertTrueEventually(() -> assertThat(service.getIterationService().getIteratorManager().getKeySet()).isEmpty());
}
|
@Override
public Optional<String> getUrlPathToJs() {
return Optional.ofNullable(analytics)
.map(WebAnalytics::getUrlPathToJs)
.filter(path -> !path.startsWith("/") && !path.contains("..") && !path.contains("://"))
.map(path -> "/" + path);
}
|
@Test
public void return_empty_if_path_is_an_url() {
WebAnalytics analytics = newWebAnalytics("http://foo");
WebAnalyticsLoaderImpl underTest = new WebAnalyticsLoaderImpl(new WebAnalytics[] {analytics});
assertThat(underTest.getUrlPathToJs()).isEmpty();
}
|
public static Dish createDish(Recipe recipe) {
Map<Product, BigDecimal> calculatedRecipeToGram = new HashMap<>();
recipe.getIngredientsProportion().forEach(((product, proportion) -> {
calculatedRecipeToGram.put(product, recipe.getBasePortionInGrams()
.multiply(proportion.divide(BigDecimal.valueOf(100), 2, RoundingMode.FLOOR)));
}));
return new Dish(calculatedRecipeToGram, recipe);
}
|
@Test
void calculateNutrients_positiveValue() {
Dish dish = Dish.createDish(recipe, BigDecimal.valueOf(1300));
assertAll("Should double the values",
() -> assertEquals(new BigDecimal("1300"), dish.getNutrients().getCalories().getTotalCalories()),
() -> assertEquals(new BigDecimal("40"), dish.getNutrients().getCarbohydrates().getTotalCarbohydrates()),
() -> assertEquals(new BigDecimal("60"), dish.getNutrients().getProteins().getTotalProteins()),
() -> assertEquals(new BigDecimal("100"), dish.getNutrients().getFats().getTotalFats()));
}
|
@Override
public X process(T input, Context context) throws Exception {
if (!this.initialized) {
initialize(context);
}
// record must be PulsarFunctionRecord.
Record<T> record = (Record<T>) context.getCurrentRecord();
// windows function processing semantics requires separate processing
if (windowConfig.getProcessingGuarantees() == WindowConfig.ProcessingGuarantees.ATMOST_ONCE) {
record.ack();
}
if (isEventTime()) {
long ts = this.timestampExtractor.extractTimestamp(record.getValue());
if (this.waterMarkEventGenerator.track(record.getTopicName().get(), ts)) {
this.windowManager.add(record, ts, record);
} else {
if (this.windowConfig.getLateDataTopic() != null) {
context.newOutputMessage(this.windowConfig.getLateDataTopic(), null).value(input).sendAsync();
} else {
log.info(String.format(
"Received a late tuple %s with ts %d. This will not be " + "processed"
+ ".", input, ts));
}
}
} else {
this.windowManager.add(record, System.currentTimeMillis(), record);
}
return null;
}
|
@Test
public void testPrepareLateTupleStreamWithoutTs() throws Exception {
context = mock(Context.class);
doReturn("test-function").when(context).getFunctionName();
doReturn("test-namespace").when(context).getNamespace();
doReturn("test-tenant").when(context).getTenant();
doReturn(Collections.singleton("test-source-topic")).when(context).getInputTopics();
doReturn("test-sink-topic").when(context).getOutputTopic();
WindowConfig windowConfig = new WindowConfig();
windowConfig.setWindowLengthDurationMs(20L);
windowConfig.setSlidingIntervalDurationMs(10L);
windowConfig.setLateDataTopic("$late");
windowConfig.setMaxLagMs(5L);
windowConfig.setWatermarkEmitIntervalMs(10L);
windowConfig.setActualWindowFunctionClassName(TestFunction.class.getName());
doReturn(Optional.of(new Gson().fromJson(new Gson().toJson(windowConfig), Map.class)))
.when(context).getUserConfigValue(WindowConfig.WINDOW_CONFIG_KEY);
try {
testWindowedPulsarFunction.process(10L, context);
fail();
} catch (IllegalArgumentException e) {
assertEquals(e.getMessage(), "Late data topic can be defined only when specifying a "
+ "timestamp extractor class");
}
}
|
@Override
public Partition getPartition(String dbName, String tblName, List<String> partitionValues) {
StorageDescriptor sd;
Map<String, String> params;
if (partitionValues.size() > 0) {
org.apache.hadoop.hive.metastore.api.Partition partition =
client.getPartition(dbName, tblName, partitionValues);
sd = partition.getSd();
params = partition.getParameters();
} else {
org.apache.hadoop.hive.metastore.api.Table table = client.getTable(dbName, tblName);
sd = table.getSd();
params = table.getParameters();
}
return HiveMetastoreApiConverter.toPartition(sd, params);
}
|
@Test
public void testGetPartition() {
HiveMetaClient client = new MockedHiveMetaClient();
HiveMetastore metastore = new HiveMetastore(client, "hive_catalog", MetastoreType.HMS);
com.starrocks.connector.hive.Partition partition = metastore.getPartition("db1", "tbl1", Lists.newArrayList("par1"));
Assert.assertEquals(ORC, partition.getFileFormat());
Assert.assertEquals("100", partition.getParameters().get(TOTAL_SIZE));
partition = metastore.getPartition("db1", "tbl1", Lists.newArrayList());
Assert.assertEquals("100", partition.getParameters().get(TOTAL_SIZE));
}
|
@Override
public GetApplicationAttemptsResponse getApplicationAttempts(
GetApplicationAttemptsRequest request) throws YarnException, IOException {
GetApplicationAttemptsResponse response =
GetApplicationAttemptsResponse
.newInstance(new ArrayList<ApplicationAttemptReport>(history
.getApplicationAttempts(request.getApplicationId()).values()));
return response;
}
|
@Test
void testApplicationAttempts() throws IOException, YarnException {
ApplicationId appId = ApplicationId.newInstance(0, 1);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
ApplicationAttemptId appAttemptId1 =
ApplicationAttemptId.newInstance(appId, 2);
GetApplicationAttemptsRequest request =
GetApplicationAttemptsRequest.newInstance(appId);
GetApplicationAttemptsResponse response =
clientService.getApplicationAttempts(request);
List<ApplicationAttemptReport> attemptReports =
response.getApplicationAttemptList();
assertNotNull(attemptReports);
assertEquals(appAttemptId, attemptReports.get(0)
.getApplicationAttemptId());
assertEquals(appAttemptId1, attemptReports.get(1)
.getApplicationAttemptId());
}
|
@Subscribe
public void onPostMenuSort(PostMenuSort postMenuSort)
{
// The menu is not rebuilt when it is open, so don't swap or else it will
// repeatedly swap entries
if (client.isMenuOpen())
{
return;
}
MenuEntry[] menuEntries = client.getMenuEntries();
// Build option map for quick lookup in findIndex
int idx = 0;
optionIndexes.clear();
for (MenuEntry entry : menuEntries)
{
String option = Text.removeTags(entry.getOption()).toLowerCase();
optionIndexes.put(option, idx++);
}
// Perform swaps
idx = 0;
for (MenuEntry entry : menuEntries)
{
swapMenuEntry(null, menuEntries, idx++, entry);
}
if (config.removeDeadNpcMenus())
{
removeDeadNpcs();
}
}
|
@Test
public void testContains()
{
when(config.swapPay()).thenReturn(true);
entries = new MenuEntry[]{
menu("Cancel", "", MenuAction.CANCEL),
menu("Examine", "Kragen", MenuAction.EXAMINE_NPC),
menu("Walk here", "", MenuAction.WALK),
menu("Pay (south)", "Kragen", MenuAction.NPC_FOURTH_OPTION),
menu("Pay (north)", "Kragen", MenuAction.NPC_THIRD_OPTION),
menu("Talk-to", "Kragen", MenuAction.NPC_FIRST_OPTION),
};
menuEntrySwapperPlugin.onPostMenuSort(new PostMenuSort());
ArgumentCaptor<MenuEntry[]> argumentCaptor = ArgumentCaptor.forClass(MenuEntry[].class);
verify(client).setMenuEntries(argumentCaptor.capture());
assertArrayEquals(new MenuEntry[]{
menu("Cancel", "", MenuAction.CANCEL),
menu("Examine", "Kragen", MenuAction.EXAMINE_NPC),
menu("Walk here", "", MenuAction.WALK),
menu("Pay (south)", "Kragen", MenuAction.NPC_FOURTH_OPTION),
menu("Talk-to", "Kragen", MenuAction.NPC_FIRST_OPTION),
menu("Pay (north)", "Kragen", MenuAction.NPC_THIRD_OPTION),
}, argumentCaptor.getValue());
}
|
public static ThreadPoolExecutor newFixedThreadPool(int corePoolSize) {
return new ThreadPoolExecutor(corePoolSize,
corePoolSize,
0,
TimeUnit.MILLISECONDS,
new SynchronousQueue<Runnable>());
}
|
@Test
public void newFixedThreadPool2() throws Exception {
BlockingQueue<Runnable> queue = new SynchronousQueue<Runnable>();
ThreadFactory factory = new NamedThreadFactory("xxx");
ThreadPoolExecutor executor = ThreadPoolUtils.newFixedThreadPool(10, queue, factory);
Assert.assertEquals(executor.getCorePoolSize(), 10);
Assert.assertEquals(executor.getMaximumPoolSize(), 10);
Assert.assertEquals(executor.getQueue(), queue);
Assert.assertEquals(executor.getThreadFactory(), factory);
}
|
private RemotingCommand getUser(ChannelHandlerContext ctx,
RemotingCommand request) throws RemotingCommandException {
final RemotingCommand response = RemotingCommand.createResponseCommand(null);
GetUserRequestHeader requestHeader = request.decodeCommandCustomHeader(GetUserRequestHeader.class);
if (StringUtils.isBlank(requestHeader.getUsername())) {
response.setCode(ResponseCode.SYSTEM_ERROR);
response.setRemark("The username is blank");
return response;
}
this.brokerController.getAuthenticationMetadataManager().getUser(requestHeader.getUsername())
.thenAccept(user -> {
response.setCode(ResponseCode.SUCCESS);
if (user != null) {
UserInfo userInfo = UserConverter.convertUser(user);
response.setBody(JSON.toJSONString(userInfo).getBytes(StandardCharsets.UTF_8));
}
})
.exceptionally(ex -> {
LOGGER.error("get user {} error", requestHeader.getUsername(), ex);
return handleAuthException(response, ex);
})
.join();
return response;
}
|
@Test
public void testGetUser() throws RemotingCommandException {
when(authenticationMetadataManager.getUser(eq("abc"))).thenReturn(CompletableFuture.completedFuture(User.of("abc", "123", UserType.NORMAL)));
GetUserRequestHeader getUserRequestHeader = new GetUserRequestHeader();
getUserRequestHeader.setUsername("abc");
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.AUTH_GET_USER, getUserRequestHeader);
request.setVersion(441);
request.addExtField("AccessKey", "rocketmq");
request.makeCustomHeaderToNet();
RemotingCommand response = adminBrokerProcessor.processRequest(handlerContext, request);
assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS);
UserInfo userInfo = JSON.parseObject(new String(response.getBody()), UserInfo.class);
assertThat(userInfo.getUsername()).isEqualTo("abc");
assertThat(userInfo.getPassword()).isEqualTo("123");
assertThat(userInfo.getUserType()).isEqualTo("Normal");
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.