focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@Override
public AccessResource parse(RemotingCommand request, String remoteAddr) {
return PlainAccessResource.parse(request, remoteAddr);
}
|
@Test
public void validateForAdminCommandWithOutAclRPCHook() {
RemotingCommand consumerOffsetAdminRequest = RemotingCommand.createRequestCommand(RequestCode.GET_ALL_CONSUMER_OFFSET, null);
plainAccessValidator.parse(consumerOffsetAdminRequest, "192.168.0.1:9876");
RemotingCommand subscriptionGroupAdminRequest = RemotingCommand.createRequestCommand(RequestCode.GET_ALL_SUBSCRIPTIONGROUP_CONFIG, null);
plainAccessValidator.parse(subscriptionGroupAdminRequest, "192.168.0.1:9876");
RemotingCommand delayOffsetAdminRequest = RemotingCommand.createRequestCommand(RequestCode.GET_ALL_DELAY_OFFSET, null);
plainAccessValidator.parse(delayOffsetAdminRequest, "192.168.0.1:9876");
RemotingCommand allTopicConfigAdminRequest = RemotingCommand.createRequestCommand(RequestCode.GET_ALL_TOPIC_CONFIG, null);
plainAccessValidator.parse(allTopicConfigAdminRequest, "192.168.0.1:9876");
}
|
@Bean
public ShenyuContextDecorator motanShenyuContextDecorator() {
return new MotanShenyuContextDecorator();
}
|
@Test
public void testMotanShenyuContextDecorator() {
applicationContextRunner.run(context -> {
ShenyuContextDecorator subscriber = context.getBean("motanShenyuContextDecorator", ShenyuContextDecorator.class);
assertNotNull(subscriber);
}
);
}
|
@Override
public HttpResponse validateAndCreate(@JsonBody JSONObject request) {
String accessToken = (String) request.get("accessToken");
if(accessToken == null){
throw new ServiceException.BadRequestException("accessToken is required");
}
accessToken = accessToken.trim();
try {
User authenticatedUser = getAuthenticatedUser();
checkPermission();
HttpURLConnection connection = connect(String.format("%s/%s", getUri(), "user"),accessToken);
validateAccessTokenScopes(connection);
String data = IOUtils.toString(HttpRequest.getInputStream(connection), Charset.defaultCharset());
GHUser user = GithubScm.getMappingObjectReader().forType(GHUser.class).readValue(data);
if(user.getEmail() != null){
Mailer.UserProperty p = authenticatedUser.getProperty(Mailer.UserProperty.class);
//XXX: If there is already email address of this user, should we update it with
// the one from Github?
if (p==null){
authenticatedUser.addProperty(new Mailer.UserProperty(user.getEmail()));
}
}
//Now we know the token is valid. Lets find credential
String credentialId = createCredentialId(getUri());
StandardUsernamePasswordCredentials githubCredential =
CredentialsUtils.findCredential(credentialId, StandardUsernamePasswordCredentials.class, new BlueOceanDomainRequirement());
final StandardUsernamePasswordCredentials credential =
new UsernamePasswordCredentialsImpl(CredentialsScope.USER, credentialId, getCredentialDescription(),
authenticatedUser.getId(), accessToken);
if(githubCredential == null) {
CredentialsUtils.createCredentialsInUserStore(
credential, authenticatedUser, getCredentialDomainName(),
Collections.singletonList(new BlueOceanDomainSpecification()));
}else{
CredentialsUtils.updateCredentialsInUserStore(
githubCredential, credential, authenticatedUser, getCredentialDomainName(),
Collections.singletonList(new BlueOceanDomainSpecification()));
}
return createResponse(credential.getId());
} catch (IOException e) {
if (e instanceof MalformedURLException || e instanceof UnknownHostException) {
throw new ServiceException.BadRequestException(
new ErrorMessage(400, "Invalid apiUrl").add(
new ErrorMessage.Error("apiUrl", ErrorMessage.Error.ErrorCodes.INVALID.toString(), e.getMessage())
)
);
}
throw new ServiceException.UnexpectedErrorException(e.getMessage());
}
}
|
@Test
public void validateAndCreatePaddedToken() throws Exception {
validateAndCreate(" 12345 ");
}
|
public int computeThreshold(StreamConfig streamConfig, CommittingSegmentDescriptor committingSegmentDescriptor,
@Nullable SegmentZKMetadata committingSegmentZKMetadata, String newSegmentName) {
long desiredSegmentSizeBytes = streamConfig.getFlushThresholdSegmentSizeBytes();
if (desiredSegmentSizeBytes <= 0) {
desiredSegmentSizeBytes = StreamConfig.DEFAULT_FLUSH_THRESHOLD_SEGMENT_SIZE_BYTES;
}
long optimalSegmentSizeBytesMin = desiredSegmentSizeBytes / 2;
double optimalSegmentSizeBytesMax = desiredSegmentSizeBytes * 1.5;
if (committingSegmentZKMetadata == null) { // first segment of the partition, hence committing segment is null
if (_latestSegmentRowsToSizeRatio > 0) { // new partition group added case
long targetSegmentNumRows = (long) (desiredSegmentSizeBytes * _latestSegmentRowsToSizeRatio);
targetSegmentNumRows = capNumRowsIfOverflow(targetSegmentNumRows);
SegmentSizeBasedFlushThresholdUpdater.LOGGER.info(
"Committing segment zk metadata is not available, using prev ratio {}, setting rows threshold for {} as {}",
_latestSegmentRowsToSizeRatio, newSegmentName, targetSegmentNumRows);
return (int) targetSegmentNumRows;
} else {
final int autotuneInitialRows = streamConfig.getFlushAutotuneInitialRows();
SegmentSizeBasedFlushThresholdUpdater.LOGGER.info(
"Committing segment zk metadata is not available, setting threshold for {} as {}", newSegmentName,
autotuneInitialRows);
return autotuneInitialRows;
}
}
final long committingSegmentSizeBytes = committingSegmentDescriptor.getSegmentSizeBytes();
if (committingSegmentSizeBytes <= 0 // repair segment case
|| SegmentCompletionProtocol.REASON_FORCE_COMMIT_MESSAGE_RECEIVED.equals(
committingSegmentDescriptor.getStopReason())) {
String reason = committingSegmentSizeBytes <= 0 //
? "Committing segment size is not available" //
: "Committing segment is due to force-commit";
final int targetNumRows = committingSegmentZKMetadata.getSizeThresholdToFlushSegment();
SegmentSizeBasedFlushThresholdUpdater.LOGGER.info("{}, setting thresholds from previous segment for {} as {}",
reason, newSegmentName, targetNumRows);
return targetNumRows;
}
final long timeConsumed = _clock.millis() - committingSegmentZKMetadata.getCreationTime();
final long numRowsConsumed = committingSegmentZKMetadata.getTotalDocs();
final int numRowsThreshold = committingSegmentZKMetadata.getSizeThresholdToFlushSegment();
SegmentSizeBasedFlushThresholdUpdater.LOGGER.info(
"{}: Data from committing segment: Time {} numRows {} threshold {} segmentSize(bytes) {}",
newSegmentName, TimeUtils.convertMillisToPeriod(timeConsumed), numRowsConsumed, numRowsThreshold,
committingSegmentSizeBytes);
double currentRatio = (double) numRowsConsumed / committingSegmentSizeBytes;
if (_latestSegmentRowsToSizeRatio > 0) {
_latestSegmentRowsToSizeRatio =
CURRENT_SEGMENT_RATIO_WEIGHT * currentRatio + PREVIOUS_SEGMENT_RATIO_WEIGHT * _latestSegmentRowsToSizeRatio;
} else {
_latestSegmentRowsToSizeRatio = currentRatio;
}
// If the number of rows consumed is less than what we set as target in metadata, then the segment hit time limit.
// We can set the new target to be slightly higher than the actual number of rows consumed so that we can aim
// to hit the row limit next time around.
//
// If the size of the committing segment is higher than the desired segment size, then the administrator has
// set a lower segment size threshold. We should treat this case as if we have hit thw row limit and not the time
// limit.
//
// TODO: add feature to adjust time threshold as well
// If we set new threshold to be numRowsConsumed, we might keep oscillating back and forth between doubling limit
// and time threshold being hit If we set new threshold to be committingSegmentZKMetadata
// .getSizeThresholdToFlushSegment(),
// we might end up using a lot more memory than required for the segment Using a minor bump strategy, until
// we add feature to adjust time We will only slightly bump the threshold based on numRowsConsumed
if (numRowsConsumed < numRowsThreshold && committingSegmentSizeBytes < desiredSegmentSizeBytes) {
final long timeThresholdMillis = streamConfig.getFlushThresholdTimeMillis();
long currentNumRows = numRowsConsumed;
StringBuilder logStringBuilder = new StringBuilder().append("Time threshold reached. ");
if (timeThresholdMillis < timeConsumed) {
// The administrator has reduced the time threshold. Adjust the
// number of rows to match the average consumption rate on the partition.
currentNumRows = timeThresholdMillis * numRowsConsumed / timeConsumed;
logStringBuilder.append(" Detected lower time threshold, adjusting numRowsConsumed to ").append(currentNumRows)
.append(". ");
}
long targetSegmentNumRows = (long) (currentNumRows * ROWS_MULTIPLIER_WHEN_TIME_THRESHOLD_HIT);
targetSegmentNumRows = capNumRowsIfOverflow(targetSegmentNumRows);
logStringBuilder.append("Setting segment size for {} as {}");
SegmentSizeBasedFlushThresholdUpdater.LOGGER.info(logStringBuilder.toString(),
newSegmentName, targetSegmentNumRows);
return (int) targetSegmentNumRows;
}
long targetSegmentNumRows;
if (committingSegmentSizeBytes < optimalSegmentSizeBytesMin) {
targetSegmentNumRows = numRowsConsumed + numRowsConsumed / 2;
} else if (committingSegmentSizeBytes > optimalSegmentSizeBytesMax) {
targetSegmentNumRows = numRowsConsumed / 2;
} else {
if (_latestSegmentRowsToSizeRatio > 0) {
targetSegmentNumRows = (long) (desiredSegmentSizeBytes * _latestSegmentRowsToSizeRatio);
} else {
targetSegmentNumRows = (long) (desiredSegmentSizeBytes * currentRatio);
}
}
targetSegmentNumRows = capNumRowsIfOverflow(targetSegmentNumRows);
SegmentSizeBasedFlushThresholdUpdater.LOGGER.info(
"Committing segment size {}, current ratio {}, setting threshold for {} as {}",
committingSegmentSizeBytes, _latestSegmentRowsToSizeRatio, newSegmentName, targetSegmentNumRows);
return (int) targetSegmentNumRows;
}
|
@Test
public void testUseLastSegmentSizeTimesRatioIfFirstSegmentInPartitionAndNewPartitionGroupMinimumSize10000Rows() {
double segmentRowsToSizeRatio = 1.5;
long segmentSizeBytes = 2000L;
SegmentFlushThresholdComputer computer =
new SegmentFlushThresholdComputer(Clock.systemUTC(), segmentRowsToSizeRatio);
StreamConfig streamConfig = mock(StreamConfig.class);
when(streamConfig.getFlushThresholdSegmentSizeBytes()).thenReturn(segmentSizeBytes);
CommittingSegmentDescriptor committingSegmentDescriptor = mock(CommittingSegmentDescriptor.class);
int threshold = computer.computeThreshold(streamConfig, committingSegmentDescriptor, null, "newSegmentName");
assertEquals(threshold, 10000);
}
|
@Override
public Optional<P> authenticate(C credentials) throws AuthenticationException {
try (Timer.Context context = gets.time()) {
return cache.get(credentials);
} catch (CompletionException e) {
final Throwable cause = e.getCause();
if (cause instanceof InvalidCredentialsException) {
return Optional.empty();
}
if (cause instanceof AuthenticationException) {
throw (AuthenticationException) cause;
}
if (cause == null) {
throw new AuthenticationException(e);
}
throw new AuthenticationException(cause);
}
}
|
@Test
void cachesTheFirstReturnedPrincipal() throws Exception {
assertThat(cached.authenticate("credentials")).isEqualTo(Optional.<Principal>of(new PrincipalImpl("principal")));
assertThat(cached.authenticate("credentials")).isEqualTo(Optional.<Principal>of(new PrincipalImpl("principal")));
verify(underlying, times(1)).authenticate("credentials");
}
|
public FluentBackoff withMaxBackoff(Duration maxBackoff) {
checkArgument(
maxBackoff.getMillis() > 0, "maxBackoff %s must be at least 1 millisecond", maxBackoff);
return new FluentBackoff(
exponent,
initialBackoff,
maxBackoff,
maxCumulativeBackoff,
maxRetries,
throttledTimeCounter);
}
|
@Test
public void testInvalidMaxBackoff() {
thrown.expect(IllegalArgumentException.class);
thrown.expectMessage("maxBackoff PT0S must be at least 1 millisecond");
defaultBackoff.withMaxBackoff(Duration.ZERO);
}
|
public static void validateValue(Schema schema, Object value) {
validateValue(null, schema, value);
}
|
@Test
public void testValidateFieldWithInvalidValueMismatchTimestamp() {
long longValue = 1000L;
String fieldName = "field";
ConnectSchema.validateValue(fieldName, Schema.INT64_SCHEMA, longValue);
assertInvalidValueForSchema(fieldName, Timestamp.SCHEMA, longValue,
"Invalid Java object for schema \"org.apache.kafka.connect.data.Timestamp\" " +
"with type INT64: class java.lang.Long for field: \"field\"");
}
|
@Override
public void beginShutdown(String source) {
lock.lock();
try {
if (shuttingDown) {
log.debug("{}: Event queue is already shutting down.", source);
return;
}
log.info("{}: shutting down event queue.", source);
shuttingDown = true;
eventHandler.cond.signal();
} finally {
lock.unlock();
}
}
|
@Test
public void testShutdownBeforeDeferred() throws Exception {
try (KafkaEventQueue queue = new KafkaEventQueue(Time.SYSTEM, logContext, "testShutdownBeforeDeferred")) {
final AtomicInteger count = new AtomicInteger(0);
CompletableFuture<Integer> future = new CompletableFuture<>();
queue.scheduleDeferred("myDeferred",
__ -> OptionalLong.of(Time.SYSTEM.nanoseconds() + HOURS.toNanos(1)),
new FutureEvent<>(future, () -> count.getAndAdd(1)));
queue.beginShutdown("testShutdownBeforeDeferred");
assertEquals(RejectedExecutionException.class, assertThrows(ExecutionException.class, () -> future.get()).getCause().getClass());
assertEquals(0, count.get());
}
}
|
@Transactional
public void deleteUsedMemberCoupons(final Long buyerId, final List<Long> usedCoupons) {
memberCouponRepository.deleteByMemberIdAndCouponIdIn(buyerId, usedCoupons);
}
|
@Test
void 멤버가_사용한_쿠폰을_모두_제거한다() {
// given
Coupon coupon = couponRepository.save(쿠픈_생성_독자_사용_할인율_10_퍼센트());
MemberCoupon memberCoupon = memberCouponRepository.save(멤버_쿠폰_생성());
// when
couponService.deleteUsedMemberCoupons(memberCoupon.getMemberId(), List.of(coupon.getId()));
// then
List<MemberCoupon> result = memberCouponRepository.findAllByMemberId(memberCoupon.getMemberId());
assertThat(result).isEmpty();
}
|
public int getHeaderLen() {
return header_len;
}
|
@Test
public void getHeaderLen() {
assertEquals(TestParameters.VP_ITSF_HEADER_LENGTH, chmItsfHeader.getHeaderLen());
}
|
@Deprecated
public static DockerCmdExecFactory getDefaultDockerCmdExecFactory() {
return new JerseyDockerCmdExecFactory();
}
|
@Test
public void testConcurrentClientBuilding() throws Exception {
// we use it to check instance uniqueness
final Set<DockerCmdExecFactory> instances = Collections.synchronizedSet(new HashSet<>());
Runnable runnable = () -> {
DockerCmdExecFactory factory = DockerClientBuilder.getDefaultDockerCmdExecFactory();
// factory created
assertNotNull(factory);
// and is unique
assertFalse(instances.contains(factory));
instances.add(factory);
};
parallel(AMOUNT, runnable);
// set contains all required unique instances
assertEquals(AMOUNT, instances.size());
}
|
@Override
public void write(final MySQLPacketPayload payload, final Object value) {
payload.getByteBuf().writeFloatLE(Float.parseFloat(value.toString()));
}
|
@Test
void assertWrite() {
new MySQLFloatBinaryProtocolValue().write(new MySQLPacketPayload(byteBuf, StandardCharsets.UTF_8), 1.0F);
verify(byteBuf).writeFloatLE(1.0F);
}
|
@Override
public DataSink createDataSink(Context context) {
FactoryHelper.createFactoryHelper(this, context)
.validateExcept(PREFIX_TABLE_PROPERTIES, PREFIX_CATALOG_PROPERTIES);
Map<String, String> allOptions = context.getFactoryConfiguration().toMap();
Map<String, String> catalogOptions = new HashMap<>();
Map<String, String> tableOptions = new HashMap<>();
allOptions.forEach(
(key, value) -> {
if (key.startsWith(PREFIX_TABLE_PROPERTIES)) {
tableOptions.put(key.substring(PREFIX_TABLE_PROPERTIES.length()), value);
} else if (key.startsWith(PaimonDataSinkOptions.PREFIX_CATALOG_PROPERTIES)) {
catalogOptions.put(
key.substring(
PaimonDataSinkOptions.PREFIX_CATALOG_PROPERTIES.length()),
value);
}
});
Options options = Options.fromMap(catalogOptions);
try (Catalog catalog = FlinkCatalogFactory.createPaimonCatalog(options)) {
Preconditions.checkNotNull(
catalog.listDatabases(), "catalog option of Paimon is invalid.");
} catch (Exception e) {
throw new RuntimeException("failed to create or use paimon catalog", e);
}
ZoneId zoneId = ZoneId.systemDefault();
if (!Objects.equals(
context.getPipelineConfiguration().get(PipelineOptions.PIPELINE_LOCAL_TIME_ZONE),
PipelineOptions.PIPELINE_LOCAL_TIME_ZONE.defaultValue())) {
zoneId =
ZoneId.of(
context.getPipelineConfiguration()
.get(PipelineOptions.PIPELINE_LOCAL_TIME_ZONE));
}
String commitUser =
context.getFactoryConfiguration().get(PaimonDataSinkOptions.COMMIT_USER);
String partitionKey =
context.getFactoryConfiguration().get(PaimonDataSinkOptions.PARTITION_KEY);
Map<TableId, List<String>> partitionMaps = new HashMap<>();
if (!partitionKey.isEmpty()) {
for (String tables : partitionKey.split(";")) {
String[] splits = tables.split(":");
if (splits.length == 2) {
TableId tableId = TableId.parse(splits[0]);
List<String> partitions = Arrays.asList(splits[1].split(","));
partitionMaps.put(tableId, partitions);
} else {
throw new IllegalArgumentException(
PaimonDataSinkOptions.PARTITION_KEY.key()
+ " is malformed, please refer to the documents");
}
}
}
PaimonRecordSerializer<Event> serializer = new PaimonRecordEventSerializer(zoneId);
String schemaOperatorUid =
context.getPipelineConfiguration()
.get(PipelineOptions.PIPELINE_SCHEMA_OPERATOR_UID);
return new PaimonDataSink(
options,
tableOptions,
commitUser,
partitionMaps,
serializer,
zoneId,
schemaOperatorUid);
}
|
@Test
void testCreateDataSink() {
DataSinkFactory sinkFactory =
FactoryDiscoveryUtils.getFactoryByIdentifier("paimon", DataSinkFactory.class);
Assertions.assertThat(sinkFactory).isInstanceOf(PaimonDataSinkFactory.class);
Configuration conf =
Configuration.fromMap(
ImmutableMap.<String, String>builder()
.put(PaimonDataSinkOptions.METASTORE.key(), "filesystem")
.put(
PaimonDataSinkOptions.WAREHOUSE.key(),
new File(
temporaryFolder.toFile(),
UUID.randomUUID().toString())
.toString())
.build());
DataSink dataSink =
sinkFactory.createDataSink(
new FactoryHelper.DefaultContext(
conf, conf, Thread.currentThread().getContextClassLoader()));
Assertions.assertThat(dataSink).isInstanceOf(PaimonDataSink.class);
}
|
public CatCommand(Logger console, long defaultNumRecords) {
super(console);
this.numRecords = defaultNumRecords;
}
|
@Test
public void testCatCommand() throws IOException {
File file = parquetFile();
CatCommand command = new CatCommand(createLogger(), 0);
command.sourceFiles = Arrays.asList(file.getAbsolutePath());
command.setConf(new Configuration());
Assert.assertEquals(0, command.run());
}
|
public synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName) {
Objects.requireNonNull(storeName, "storeName cannot be null");
if (topologyMetadata.hasNamedTopologies()) {
throw new IllegalArgumentException("Cannot invoke the getAllMetadataForStore(storeName) method when"
+ "using named topologies, please use the overload that accepts"
+ "a topologyName parameter to identify the correct store");
}
if (!isInitialized()) {
return Collections.emptyList();
}
if (globalStores.contains(storeName)) {
return allMetadata;
}
final Collection<String> sourceTopics = topologyMetadata.sourceTopicsForStore(storeName, null);
if (sourceTopics.isEmpty()) {
return Collections.emptyList();
}
final ArrayList<StreamsMetadata> results = new ArrayList<>();
for (final StreamsMetadata metadata : allMetadata) {
if (metadata.stateStoreNames().contains(storeName) || metadata.standbyStateStoreNames().contains(storeName)) {
results.add(metadata);
}
}
return results;
}
|
@Test
public void shouldThrowIfStoreNameIsNullOnGetAllInstancesWithStore() {
assertThrows(NullPointerException.class, () -> metadataState.getAllMetadataForStore(null));
}
|
public static String getRuleName(final String ruleRow) {
String testVal = ruleRow.toLowerCase();
final int left = testVal.indexOf( DefaultRuleSheetListener.RULE_TABLE_TAG );
return ruleRow.substring( left + DefaultRuleSheetListener.RULE_TABLE_TAG.length() ).trim();
}
|
@Ignore
@Test
public void testInvalidRuleName() {
final String row = "RuleTable This is my rule name (type class)";
assertThatIllegalArgumentException().isThrownBy(() -> getRuleName(row));
}
|
public static String toStr(Object value, String defaultValue) {
return convertQuietly(String.class, value, defaultValue);
}
|
@Test
public void toStrTest() {
final int a = 1;
final long[] b = {1, 2, 3, 4, 5};
assertEquals("[1, 2, 3, 4, 5]", Convert.convert(String.class, b));
final String aStr = Convert.toStr(a);
assertEquals("1", aStr);
final String bStr = Convert.toStr(b);
assertEquals("[1, 2, 3, 4, 5]", Convert.toStr(bStr));
}
|
@Override
public boolean isWrapperFor(final Class<?> iface) {
return false;
}
|
@Test
void assertIsWrapperFor() {
assertFalse(metaData.isWrapperFor(null));
}
|
public void processScheduledTask(String name) throws SharedServiceClientException {
if(RE_CHECK_DOCUMENTS.equals(name)) {
var daysAgo = sharedServiceClient.getSSConfigInt("interval_lost_stolen_check");
for (IdCheckDocument document : idCheckDocumentRepository.findAllWithCreationDateTimeBefore(ZonedDateTime.now().minusDays(daysAgo))) {
var response = dwsClient.checkBvBsn(document.getDocumentType(), document.getDocumentNumber());
if (response.get(STATUS).equals(NOK)) {
digidClient.remoteLog("1577", Map.of(
"document_type", document.getDocumentType(),
"document_number", document.getDocumentNumber(),
"user_app", document.getUserAppId(),
"account_id", document.getAccountId(),
"created_at", document.getCreatedAt().format(DateTimeFormatter.ISO_LOCAL_DATE),
HIDDEN, true
));
}
idCheckDocumentRepository.delete(document);
}
}
}
|
@Test
void processesScheduledTaskStolenDocument() throws SharedServiceClientException {
when(dwsClient.checkBvBsn("document_type", "document_number")).thenReturn(Map.of("status", "NOK"));
when(sharedServiceClient.getSSConfigInt("interval_lost_stolen_check")).thenReturn(7);
service.processScheduledTask("re_check_documents");
verify(digidClient, times(1)).remoteLog("1577", Map.of("document_type", "document_type", "document_number", "document_number","user_app", "user_app_id", "account_id", 1L, "created_at", "2022-10-07", "hidden", true));
}
|
public static void e(String tag, String message, Object... args) {
sLogger.e(tag, message, args);
}
|
@Test
public void error() {
String tag = "TestTag";
String message = "Test message";
LogManager.e(tag, message);
verify(logger).e(tag, message);
}
|
@Udf(description = "Converts a TIMESTAMP value into the"
+ " string representation of the timestamp in the given format. Single quotes in the"
+ " timestamp format can be escaped with '', for example: 'yyyy-MM-dd''T''HH:mm:ssX'"
+ " The system default time zone is used when no time zone is explicitly provided."
+ " The format pattern should be in the format expected"
+ " by java.time.format.DateTimeFormatter")
public String formatTimestamp(
@UdfParameter(
description = "TIMESTAMP value.") final Timestamp timestamp,
@UdfParameter(
description = "The format pattern should be in the format expected by"
+ " java.time.format.DateTimeFormatter.") final String formatPattern) {
return formatTimestamp(timestamp, formatPattern, ZoneId.of("GMT").getId());
}
|
@Test
public void testPSTTimeZone() {
// When:
final String result = udf.formatTimestamp(new Timestamp(1534353043000L),
"yyyy-MM-dd HH:mm:ss", "America/Los_Angeles");
// Then:
assertThat(result, is("2018-08-15 10:10:43"));
}
|
public BigDecimal getAmount() {
return BigDecimal.valueOf(cent, currency.getDefaultFractionDigits());
}
|
@Test
public void centToYuanTest() {
final Money money = new Money(1234, 56);
assertEquals(1234.56D, money.getAmount().doubleValue(), 0);
assertEquals(1234.56D, MathUtil.centToYuan(123456), 0);
}
|
RequestQueue<ReadRequest> getReadRequestQueue(FileIOChannel.ID channelID) {
return this.readers[channelID.getThreadNum()].requestQueue;
}
|
@Test
void testExceptionInCallbackRead() throws Exception {
final AtomicBoolean handlerCalled = new AtomicBoolean();
ReadRequest regularRequest =
new ReadRequest() {
@Override
public void requestDone(IOException ioex) {
synchronized (handlerCalled) {
handlerCalled.set(true);
handlerCalled.notifyAll();
}
}
@Override
public void read() {}
};
ReadRequest exceptionThrower =
new ReadRequest() {
@Override
public void requestDone(IOException ioex) {
throw new RuntimeException();
}
@Override
public void read() {}
};
RequestQueue<ReadRequest> rq = ioManager.getReadRequestQueue(ioManager.createChannel());
// queue first an exception thrower, then a regular request.
// we check that the regular request gets successfully handled
rq.add(exceptionThrower);
rq.add(regularRequest);
synchronized (handlerCalled) {
while (!handlerCalled.get()) {
handlerCalled.wait();
}
}
}
|
@Override
public void createApiAccessLog(ApiAccessLogCreateReqDTO createDTO) {
ApiAccessLogDO apiAccessLog = BeanUtils.toBean(createDTO, ApiAccessLogDO.class);
apiAccessLog.setRequestParams(StrUtil.maxLength(apiAccessLog.getRequestParams(), REQUEST_PARAMS_MAX_LENGTH));
apiAccessLog.setResultMsg(StrUtil.maxLength(apiAccessLog.getResultMsg(), RESULT_MSG_MAX_LENGTH));
if (TenantContextHolder.getTenantId() != null) {
apiAccessLogMapper.insert(apiAccessLog);
} else {
// 极端情况下,上下文中没有租户时,此时忽略租户上下文,避免插入失败!
TenantUtils.executeIgnore(() -> apiAccessLogMapper.insert(apiAccessLog));
}
}
|
@Test
public void testCreateApiAccessLog() {
// 准备参数
ApiAccessLogCreateReqDTO createDTO = randomPojo(ApiAccessLogCreateReqDTO.class);
// 调用
apiAccessLogService.createApiAccessLog(createDTO);
// 断言
ApiAccessLogDO apiAccessLogDO = apiAccessLogMapper.selectOne(null);
assertPojoEquals(createDTO, apiAccessLogDO);
}
|
public static String toUnicodeHex(int value) {
final StringBuilder builder = new StringBuilder(6);
builder.append("\\u");
String hex = toHex(value);
int len = hex.length();
if (len < 4) {
builder.append("0000", 0, 4 - len);// 不足4位补0
}
builder.append(hex);
return builder.toString();
}
|
@Test
public void toUnicodeHexTest() {
String unicodeHex = HexUtil.toUnicodeHex('\u2001');
assertEquals("\\u2001", unicodeHex);
unicodeHex = HexUtil.toUnicodeHex('你');
assertEquals("\\u4f60", unicodeHex);
}
|
void startup(@Observes StartupEvent event) {
if (storageProviderMetricsBinderInstance.isResolvable()) {
storageProviderMetricsBinderInstance.get();
LOGGER.debug("JobRunr StorageProvider MicroMeter Metrics enabled");
}
if (backgroundJobServerMetricsBinderInstance.isResolvable()) {
backgroundJobServerMetricsBinderInstance.get();
LOGGER.debug("JobRunr BackgroundJobServer MicroMeter Metrics enabled");
}
}
|
@Test
void metricsStarterStartsBackgroundJobServerMetricsBinderIfAvailable() {
when(backgroundJobServerMetricsBinderInstance.isResolvable()).thenReturn(true);
jobRunrMetricsStarter.startup(new StartupEvent());
verify(backgroundJobServerMetricsBinderInstance).get();
}
|
public static long readUint32(ByteBuffer buf) throws BufferUnderflowException {
return Integer.toUnsignedLong(buf.order(ByteOrder.LITTLE_ENDIAN).getInt());
}
|
@Test(expected = ArrayIndexOutOfBoundsException.class)
public void testReadUint32ThrowsException2() {
ByteUtils.readUint32(new byte[]{1, 2, 3, 4, 5}, 2);
}
|
public static String normalizeUri(String uri) throws URISyntaxException {
// try to parse using the simpler and faster Camel URI parser
String[] parts = CamelURIParser.fastParseUri(uri);
if (parts != null) {
// we optimized specially if an empty array is returned
if (parts == URI_ALREADY_NORMALIZED) {
return uri;
}
// use the faster and more simple normalizer
return doFastNormalizeUri(parts);
} else {
// use the legacy normalizer as the uri is complex and may have unsafe URL characters
return doComplexNormalizeUri(uri);
}
}
|
@Test
public void testNormalizeUriWhereParameterIsFaulty() throws Exception {
String out = URISupport.normalizeUri("stream:uri?file:///d:/temp/data/log/quickfix.log&scanStream=true");
assertNotNull(out);
}
|
@Override
@SuppressWarnings("assignment")
public ComputeMessageStatsResponse computeMessageStats(Offset offset) throws ApiException {
try {
return client
.computeMessageStats(topicPath, partition, offset, Offset.of(Long.MAX_VALUE))
.get(1, MINUTES);
} catch (Throwable t) {
throw toCanonical(t).underlying;
}
}
|
@Test
public void computeMessageStats_validResponseCached() {
Timestamp minEventTime = Timestamp.newBuilder().setSeconds(1000).setNanos(10).build();
Timestamp minPublishTime = Timestamp.newBuilder().setSeconds(1001).setNanos(11).build();
ComputeMessageStatsResponse response =
ComputeMessageStatsResponse.newBuilder()
.setMessageCount(10)
.setMessageBytes(100)
.setMinimumEventTime(minEventTime.toBuilder().setSeconds(1002).build())
.setMinimumPublishTime(minPublishTime)
.build();
when(mockClient.computeMessageStats(
example(TopicPath.class),
example(Partition.class),
example(Offset.class),
Offset.of(Long.MAX_VALUE)))
.thenReturn(ApiFutures.immediateFuture(response));
assertEquals(reader.computeMessageStats(example(Offset.class)), response);
}
|
public static <T> Partition<T> of(
int numPartitions,
PartitionWithSideInputsFn<? super T> partitionFn,
Requirements requirements) {
Contextful ctfFn =
Contextful.fn(
(T element, Contextful.Fn.Context c) ->
partitionFn.partitionFor(element, numPartitions, c),
requirements);
return new Partition<>(new PartitionDoFn<T>(numPartitions, ctfFn, partitionFn));
}
|
@Test
public void testZeroNumPartitions() {
PCollection<Integer> input = pipeline.apply(Create.of(591));
thrown.expect(IllegalArgumentException.class);
thrown.expectMessage("numPartitions must be > 0");
input.apply(Partition.of(0, new IdentityFn()));
}
|
@Override
public JdbcRecordIterator
getRecordIterator(Configuration conf, String partitionColumn, String lowerBound, String upperBound, int limit, int
offset) throws
HiveJdbcDatabaseAccessException {
Connection conn = null;
PreparedStatement ps = null;
ResultSet rs = null;
try {
initializeDatabaseConnection(conf);
String tableName = getQualifiedTableName(conf);
// Always use JDBC_QUERY if available both for correctness and performance. JDBC_QUERY can be set by the user
// or the CBO including pushdown optimizations. SELECT all query should be used only when JDBC_QUERY is null.
String sql = firstNonNull(conf.get(Constants.JDBC_QUERY), selectAllFromTable(tableName));
String partitionQuery;
if (partitionColumn != null) {
partitionQuery = addBoundaryToQuery(tableName, sql, partitionColumn, lowerBound, upperBound);
} else {
partitionQuery = addLimitAndOffsetToQuery(sql, limit, offset);
}
LOGGER.info("Query to execute is [{}]", partitionQuery);
conn = dbcpDataSource.getConnection();
ps = conn.prepareStatement(partitionQuery, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
ps.setFetchSize(getFetchSize(conf));
rs = ps.executeQuery();
return new JdbcRecordIterator(this, conn, ps, rs, conf);
}
catch (Exception e) {
LOGGER.error("Caught exception while trying to execute query", e);
cleanupResources(conn, ps, rs);
throw new HiveJdbcDatabaseAccessException("Caught exception while trying to execute query: " + e.getMessage(), e);
}
}
|
@Test
public void testGetRecordIterator() throws HiveJdbcDatabaseAccessException {
Configuration conf = buildConfiguration();
DatabaseAccessor accessor = DatabaseAccessorFactory.getAccessor(conf);
JdbcRecordIterator iterator = accessor.getRecordIterator(conf, null, null, null,2, 0);
assertThat(iterator, is(notNullValue()));
int count = 0;
while (iterator.hasNext()) {
Map<String, Object> record = iterator.next();
count++;
assertThat(record, is(notNullValue()));
assertThat(record.size(), is(equalTo(7)));
assertThat(record.get("strategy_id"), is(equalTo(count)));
}
assertThat(count, is(equalTo(2)));
iterator.close();
}
|
public static String toStepName(ExecutableStage executableStage) {
/*
* Look for the first/input ParDo/DoFn in this executable stage by
* matching ParDo/DoFn's input PCollection with executable stage's
* input PCollection
*/
Set<PipelineNode.PTransformNode> inputs =
executableStage.getTransforms().stream()
.filter(
transform ->
transform
.getTransform()
.getInputsMap()
.containsValue(executableStage.getInputPCollection().getId()))
.collect(Collectors.toSet());
Set<String> outputIds =
executableStage.getOutputPCollections().stream()
.map(PipelineNode.PCollectionNode::getId)
.collect(Collectors.toSet());
/*
* Look for the last/output ParDo/DoFn in this executable stage by
* matching ParDo/DoFn's output PCollection(s) with executable stage's
* out PCollection(s)
*/
Set<PipelineNode.PTransformNode> outputs =
executableStage.getTransforms().stream()
.filter(
transform ->
CollectionUtils.containsAny(
transform.getTransform().getOutputsMap().values(), outputIds))
.collect(Collectors.toSet());
return String.format("[%s-%s]", toStepName(inputs), toStepName(outputs));
}
|
@Test
public void testExecutableStageWithOutput() {
pipeline
.apply("MyCreateOf", Create.of(KV.of(1L, "1")))
.apply("MyFilterBy", Filter.by(Objects::nonNull))
.apply(GroupByKey.create());
assertEquals("[MyCreateOf-MyFilterBy]", DoFnUtils.toStepName(getOnlyExecutableStage(pipeline)));
}
|
@Override
public Long stringSize(String path) {
return get(stringSizeAsync(path));
}
|
@Test
public void testStringSize() {
RJsonBucket<TestType> al = redisson.getJsonBucket("test", new JacksonCodec<>(TestType.class));
Long s3 = al.stringSize("name");
assertThat(s3).isNull();
TestType t = new TestType();
t.setName("name1");
NestedType nt = new NestedType();
nt.setValue(123);
nt.setValues(Arrays.asList("t1", "t2"));
t.setType(nt);
al.set(t);
long s1 = al.stringSize("name");
assertThat(s1).isEqualTo(5);
List<Long> s2 = al.stringSizeMulti("$.name");
assertThat(s2).containsExactly(5L);
}
|
public void setInitialAttributes(File file, FileAttribute<?>... attrs) {
// default values should already be sanitized by their providers
for (int i = 0; i < defaultValues.size(); i++) {
FileAttribute<?> attribute = defaultValues.get(i);
int separatorIndex = attribute.name().indexOf(':');
String view = attribute.name().substring(0, separatorIndex);
String attr = attribute.name().substring(separatorIndex + 1);
file.setAttribute(view, attr, attribute.value());
}
for (FileAttribute<?> attr : attrs) {
setAttribute(file, attr.name(), attr.value(), true);
}
}
|
@Test
public void testSetInitialAttributes() {
File file = createFile();
service.setInitialAttributes(file);
assertThat(file.getAttributeNames("test")).containsExactly("bar", "baz");
assertThat(file.getAttributeNames("owner")).containsExactly("owner");
assertThat(service.getAttribute(file, "basic:lastModifiedTime")).isInstanceOf(FileTime.class);
assertThat(file.getAttribute("test", "bar")).isEqualTo(0L);
assertThat(file.getAttribute("test", "baz")).isEqualTo(1);
}
|
public static File unzip(String zipFilePath) throws UtilException {
return unzip(zipFilePath, DEFAULT_CHARSET);
}
|
@Test
@Disabled
public void unzipTest() {
final File unzip = ZipUtil.unzip("d:/test/hutool.zip", "d:\\test", CharsetUtil.CHARSET_GBK);
Console.log(unzip);
}
|
@Override public synchronized <T>
T register(String name, String desc, T source) {
MetricsSourceBuilder sb = MetricsAnnotations.newSourceBuilder(source);
final MetricsSource s = sb.build();
MetricsInfo si = sb.info();
String name2 = name == null ? si.name() : name;
final String finalDesc = desc == null ? si.description() : desc;
final String finalName = // be friendly to non-metrics tests
DefaultMetricsSystem.sourceName(name2, !monitoring);
allSources.put(finalName, s);
LOG.debug(finalName +", "+ finalDesc);
if (monitoring) {
registerSource(finalName, finalDesc, s);
}
// We want to re-register the source to pick up new config when the
// metrics system restarts.
register(finalName, new AbstractCallback() {
@Override public void postStart() {
registerSource(finalName, finalDesc, s);
}
});
return source;
}
|
@Test(expected=MetricsException.class) public void testRegisterDupError() {
MetricsSystem ms = new MetricsSystemImpl("test");
TestSource ts = new TestSource("ts");
ms.register(ts);
ms.register(ts);
}
|
@Override
public ConsumerBuilder<T> topic(String... topicNames) {
checkArgument(topicNames != null && topicNames.length > 0,
"Passed in topicNames should not be null or empty.");
return topics(Arrays.stream(topicNames).collect(Collectors.toList()));
}
|
@Test(expectedExceptions = IllegalArgumentException.class)
public void testConsumerBuilderImplWhenTopicNamesVarargsHasNullTopic() {
consumerBuilderImpl.topic("my-topic", null);
}
|
public void mergeUpdateCount() {
updateCount = 0L;
for (int each : updateCounts) {
updateCount += each;
}
}
|
@Test
void assertGetUpdateCountWhenExecuteResultIsNotEmpty() {
UpdateResponseHeader actual = new UpdateResponseHeader(mock(SQLStatement.class), createExecuteUpdateResults());
assertThat(actual.getUpdateCount(), is(0L));
actual.mergeUpdateCount();
assertThat(actual.getUpdateCount(), is(4L));
}
|
@Override
public String toString() {
return toStringHelper(getClass())
.add("securityParamIndex", Integer.toString(securityParamIndex))
.add("sequence", Integer.toString(sequence))
.toString();
}
|
@Test
public void testToStringESP() throws Exception {
EncapSecurityPayload esp = deserializer.deserialize(bytePacket, 0, bytePacket.length);
String str = esp.toString();
assertTrue(StringUtils.contains(str, "securityParamIndex=" + 0x13572468));
assertTrue(StringUtils.contains(str, "sequence=" + 0xffff00));
}
|
@Override
public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException {
if(file.isRoot()) {
return PathAttributes.EMPTY;
}
if(containerService.isContainer(file)) {
final PathAttributes attributes = new PathAttributes();
if(log.isDebugEnabled()) {
log.debug(String.format("Read location for bucket %s", file));
}
attributes.setRegion(new S3LocationFeature(session, session.getClient().getRegionEndpointCache()).getLocation(file).getIdentifier());
return attributes;
}
if(file.getType().contains(Path.Type.upload)) {
final Write.Append append = new S3MultipartUploadService(session, new S3WriteFeature(session, acl), acl).append(file, new TransferStatus());
if(append.append) {
return new PathAttributes().withSize(append.offset);
}
throw new NotfoundException(file.getAbsolute());
}
try {
PathAttributes attr;
final Path bucket = containerService.getContainer(file);
try {
attr = new S3AttributesAdapter(session.getHost()).toAttributes(session.getClient().getVersionedObjectDetails(
file.attributes().getVersionId(), bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(file)));
}
catch(ServiceException e) {
switch(e.getResponseCode()) {
case 405:
if(log.isDebugEnabled()) {
log.debug(String.format("Mark file %s as delete marker", file));
}
// Only DELETE method is allowed for delete markers
attr = new PathAttributes();
attr.setCustom(Collections.singletonMap(KEY_DELETE_MARKER, Boolean.TRUE.toString()));
attr.setDuplicate(true);
return attr;
}
throw new S3ExceptionMappingService().map("Failure to read attributes of {0}", e, file);
}
if(StringUtils.isNotBlank(attr.getVersionId())) {
if(log.isDebugEnabled()) {
log.debug(String.format("Determine if %s is latest version for %s", attr.getVersionId(), file));
}
// Determine if latest version
try {
final String latest = new S3AttributesAdapter(session.getHost()).toAttributes(session.getClient().getObjectDetails(
bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(file))).getVersionId();
if(null != latest) {
if(log.isDebugEnabled()) {
log.debug(String.format("Found later version %s for %s", latest, file));
}
// Duplicate if not latest version
attr.setDuplicate(!latest.equals(attr.getVersionId()));
}
}
catch(ServiceException e) {
final BackgroundException failure = new S3ExceptionMappingService().map("Failure to read attributes of {0}", e, file);
if(failure instanceof NotfoundException) {
attr.setDuplicate(true);
}
else {
throw failure;
}
}
}
return attr;
}
catch(NotfoundException e) {
if(file.isDirectory()) {
if(log.isDebugEnabled()) {
log.debug(String.format("Search for common prefix %s", file));
}
// File may be marked as placeholder but no placeholder file exists. Check for common prefix returned.
try {
new S3ObjectListService(session, acl).list(file, new CancellingListProgressListener(), String.valueOf(Path.DELIMITER), 1);
}
catch(ListCanceledException l) {
// Found common prefix
return PathAttributes.EMPTY;
}
catch(NotfoundException n) {
throw e;
}
// Found common prefix
return PathAttributes.EMPTY;
}
throw e;
}
}
|
@Test
public void testFindFileUsEast() throws Exception {
final Path container = new Path("test-us-east-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume));
final Path test = new S3TouchFeature(session, new S3AccessControlListFeature(session)).touch(new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus());
final S3AttributesFinderFeature f = new S3AttributesFinderFeature(session, new S3AccessControlListFeature(session));
final PathAttributes attributes = f.find(test);
assertEquals(0L, attributes.getSize());
assertEquals("d41d8cd98f00b204e9800998ecf8427e", attributes.getChecksum().hash);
assertEquals("d41d8cd98f00b204e9800998ecf8427e", Checksum.parse(attributes.getETag()).hash);
assertNotEquals(-1L, attributes.getModificationDate());
assertEquals(test.attributes(), attributes);
// Test wrong type
try {
f.find(new Path(test.getAbsolute(), EnumSet.of(Path.Type.directory, Path.Type.placeholder)));
fail();
}
catch(NotfoundException e) {
// Expected
}
new S3DefaultDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
List<MappingField> resolveAndValidateFields(List<MappingField> userFields, Map<String, ?> options) {
if (options.get(OPTION_FORMAT) == null) {
throw QueryException.error("Missing '" + OPTION_FORMAT + "' option");
}
if (options.get(OPTION_PATH) == null) {
throw QueryException.error("Missing '" + OPTION_PATH + "' option");
}
List<MappingField> fields = findMetadataResolver(options).resolveAndValidateFields(userFields, options);
if (fields.isEmpty()) {
throw QueryException.error("The resolved field list is empty");
}
return fields;
}
|
@Test
public void when_formatIsNotSupported_then_throws() {
assertThatThrownBy(() -> resolvers.resolveAndValidateFields(
emptyList(),
Map.of(OPTION_FORMAT, "some-other-format", OPTION_PATH, "/path")
)).isInstanceOf(QueryException.class)
.hasMessageContaining("Unsupported serialization format");
}
|
CreateConnectorRequest parseConnectorConfigurationFile(String filePath) throws IOException {
ObjectMapper objectMapper = new ObjectMapper();
File connectorConfigurationFile = Paths.get(filePath).toFile();
try {
Map<String, String> connectorConfigs = objectMapper.readValue(
connectorConfigurationFile,
new TypeReference<Map<String, String>>() { });
if (!connectorConfigs.containsKey(NAME_CONFIG)) {
throw new ConnectException("Connector configuration at '" + filePath + "' is missing the mandatory '" + NAME_CONFIG + "' "
+ "configuration");
}
return new CreateConnectorRequest(connectorConfigs.get(NAME_CONFIG), connectorConfigs, null);
} catch (StreamReadException | DatabindException e) {
log.debug("Could not parse connector configuration file '{}' into a Map with String keys and values", filePath);
}
try {
objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
CreateConnectorRequest createConnectorRequest = objectMapper.readValue(connectorConfigurationFile,
new TypeReference<CreateConnectorRequest>() { });
if (createConnectorRequest.config().containsKey(NAME_CONFIG)) {
if (!createConnectorRequest.config().get(NAME_CONFIG).equals(createConnectorRequest.name())) {
throw new ConnectException("Connector name configuration in 'config' doesn't match the one specified in 'name' at '" + filePath
+ "'");
}
} else {
createConnectorRequest.config().put(NAME_CONFIG, createConnectorRequest.name());
}
return createConnectorRequest;
} catch (StreamReadException | DatabindException e) {
log.debug("Could not parse connector configuration file '{}' into an object of type {}",
filePath, CreateConnectorRequest.class.getSimpleName());
}
Map<String, String> connectorConfigs = Utils.propsToStringMap(Utils.loadProps(filePath));
if (!connectorConfigs.containsKey(NAME_CONFIG)) {
throw new ConnectException("Connector configuration at '" + filePath + "' is missing the mandatory '" + NAME_CONFIG + "' "
+ "configuration");
}
return new CreateConnectorRequest(connectorConfigs.get(NAME_CONFIG), connectorConfigs, null);
}
|
@Test
public void testParseJsonFileWithConnectorConfiguration() throws Exception {
try (FileWriter writer = new FileWriter(connectorConfigurationFile)) {
writer.write(new ObjectMapper().writeValueAsString(CONNECTOR_CONFIG));
}
CreateConnectorRequest request = connectStandalone.parseConnectorConfigurationFile(connectorConfigurationFile.getAbsolutePath());
assertEquals(CONNECTOR_NAME, request.name());
assertEquals(CONNECTOR_CONFIG, request.config());
assertNull(request.initialState());
}
|
public static <T> Builder<T> newBuilder(int initialCapacity) {
return new Builder<>(initialCapacity);
}
|
@Test(expected = IllegalArgumentException.class)
public void whenInitialCapacityNegative_thenThrowIllegalArgumentException() {
InflatableSet.newBuilder(-1);
}
|
public static String stripAll(final CharSequence str, final CharSequence prefixOrSuffix) {
if (equals(str, prefixOrSuffix)) {
return EMPTY;
}
return stripAll(str, prefixOrSuffix, prefixOrSuffix);
}
|
@Test
public void stripAllTest() {
final String SOURCE_STRING = "aaa_STRIPPED_bbb";
// ---------------------------- test stripAll ----------------------------
// Normal test
assertEquals("_STRIPPED_bbb", CharSequenceUtil.stripAll(SOURCE_STRING, "a"));
assertEquals(SOURCE_STRING, CharSequenceUtil.stripAll(SOURCE_STRING, ""));
// test null param
assertEquals("_STRIPPED_", CharSequenceUtil.stripAll(SOURCE_STRING, "a", "b"));
assertEquals(SOURCE_STRING, CharSequenceUtil.stripAll(SOURCE_STRING, null, null));
assertEquals(SOURCE_STRING, CharSequenceUtil.stripAll(SOURCE_STRING, "", ""));
assertEquals("aaa_STRIPPED_", CharSequenceUtil.stripAll(SOURCE_STRING, "", "b"));
assertEquals("aaa_STRIPPED_", CharSequenceUtil.stripAll(SOURCE_STRING, null, "b"));
assertEquals("_STRIPPED_bbb", CharSequenceUtil.stripAll(SOURCE_STRING, "a", ""));
assertEquals("_STRIPPED_bbb", CharSequenceUtil.stripAll(SOURCE_STRING, "a", null));
// special test
assertEquals("bbb", CharSequenceUtil.stripAll("aaaaaabbb", "aaa", null));
assertEquals("abbb", CharSequenceUtil.stripAll("aaaaaaabbb", "aa", null));
// aaaaaaaaa (9个a) 可以被看为 aaa_aaaa_aa
assertEquals("", CharSequenceUtil.stripAll("aaaaaaaaa", "aaa", "aa"));
// 第二次迭代后会出现 from 比 to 大的情况,原本代码是强行交换,但是回导致无法去除前后缀
assertEquals("", CharSequenceUtil.stripAll("a", "a", "a"));
// 前缀后缀有重叠,优先去掉前缀
assertEquals("a", CharSequenceUtil.stripAll("aba", "ab", "ba"));
assertEquals("a", CharSequenceUtil.stripAll("abababa", "ab", "ba"));
}
|
@Override
public boolean add(QueryableEntry obj) {
throw new UnsupportedOperationException();
}
|
@Test(expected = UnsupportedOperationException.class)
public void testIterator_addUnsopperted() {
result.add(mock(QueryableEntry.class));
}
|
@Override
public LeaderAndIsrResponse getErrorResponse(int throttleTimeMs, Throwable e) {
LeaderAndIsrResponseData responseData = new LeaderAndIsrResponseData();
Errors error = Errors.forException(e);
responseData.setErrorCode(error.code());
if (version() < 5) {
List<LeaderAndIsrPartitionError> partitions = new ArrayList<>();
for (LeaderAndIsrPartitionState partition : partitionStates()) {
partitions.add(new LeaderAndIsrPartitionError()
.setTopicName(partition.topicName())
.setPartitionIndex(partition.partitionIndex())
.setErrorCode(error.code()));
}
responseData.setPartitionErrors(partitions);
} else {
for (LeaderAndIsrTopicState topicState : data.topicStates()) {
List<LeaderAndIsrPartitionError> partitions = new ArrayList<>(
topicState.partitionStates().size());
for (LeaderAndIsrPartitionState partition : topicState.partitionStates()) {
partitions.add(new LeaderAndIsrPartitionError()
.setPartitionIndex(partition.partitionIndex())
.setErrorCode(error.code()));
}
responseData.topics().add(new LeaderAndIsrTopicError()
.setTopicId(topicState.topicId())
.setPartitionErrors(partitions));
}
}
return new LeaderAndIsrResponse(responseData, version());
}
|
@Test
public void testGetErrorResponse() {
Uuid topicId = Uuid.randomUuid();
String topicName = "topic";
int partition = 0;
for (short version : LEADER_AND_ISR.allVersions()) {
LeaderAndIsrRequest request = new LeaderAndIsrRequest.Builder(version, 0, 0, 0,
Collections.singletonList(new LeaderAndIsrPartitionState()
.setTopicName(topicName)
.setPartitionIndex(partition)),
Collections.singletonMap(topicName, topicId),
Collections.emptySet()
).build(version);
LeaderAndIsrResponse response = request.getErrorResponse(0,
new ClusterAuthorizationException("Not authorized"));
assertEquals(Errors.CLUSTER_AUTHORIZATION_FAILED, response.error());
if (version < 5) {
assertEquals(
Collections.singletonList(new LeaderAndIsrPartitionError()
.setTopicName(topicName)
.setPartitionIndex(partition)
.setErrorCode(Errors.CLUSTER_AUTHORIZATION_FAILED.code())),
response.data().partitionErrors());
assertEquals(0, response.data().topics().size());
} else {
LeaderAndIsrTopicError topicState = response.topics().find(topicId);
assertEquals(topicId, topicState.topicId());
assertEquals(
Collections.singletonList(new LeaderAndIsrPartitionError()
.setPartitionIndex(partition)
.setErrorCode(Errors.CLUSTER_AUTHORIZATION_FAILED.code())),
topicState.partitionErrors());
assertEquals(0, response.data().partitionErrors().size());
}
}
}
|
@Override
public InputStream getInputStream() {
return new RedissonInputStream();
}
|
@Test
public void testSkip() throws IOException {
RBinaryStream t = redisson.getBinaryStream("test");
t.set(new byte[] {1, 2, 3, 4, 5, 6});
InputStream is = t.getInputStream();
is.skip(3);
byte[] b = new byte[6];
is.read(b);
assertThat(b).isEqualTo(new byte[] {4, 5, 6, 0, 0, 0});
}
|
public WeightedItem<T> addOrVote(T item) {
for (int i = 0; i < list.size(); i++) {
WeightedItem<T> weightedItem = list.get(i);
if (weightedItem.item.equals(item)) {
voteFor(weightedItem);
return weightedItem;
}
}
return organizeAndAdd(item);
}
|
@Test
public void testListReorganizesAfterEnoughVotes() {
WeightedEvictableList<String> list = new WeightedEvictableList<>(3, 3);
list.addOrVote("c");
list.addOrVote("b");
list.addOrVote("b");
list.addOrVote("a");
list.addOrVote("a");
list.addOrVote("a");
assertItemsInOrder(list, "a", "b", "c");
assertWeightsInOrder(list, 3, 2, 1);
}
|
public ResourcePattern toKafkaResourcePattern() {
org.apache.kafka.common.resource.ResourceType kafkaType;
String kafkaName;
PatternType kafkaPattern = PatternType.LITERAL;
switch (type) {
case TOPIC:
kafkaType = org.apache.kafka.common.resource.ResourceType.TOPIC;
kafkaName = name;
if (AclResourcePatternType.PREFIX.equals(pattern)) {
kafkaPattern = PatternType.PREFIXED;
}
break;
case GROUP:
kafkaType = org.apache.kafka.common.resource.ResourceType.GROUP;
kafkaName = name;
if (AclResourcePatternType.PREFIX.equals(pattern)) {
kafkaPattern = PatternType.PREFIXED;
}
break;
case CLUSTER:
kafkaType = org.apache.kafka.common.resource.ResourceType.CLUSTER;
kafkaName = "kafka-cluster";
break;
case TRANSACTIONAL_ID:
kafkaType = org.apache.kafka.common.resource.ResourceType.TRANSACTIONAL_ID;
kafkaName = name;
if (AclResourcePatternType.PREFIX.equals(pattern)) {
kafkaPattern = PatternType.PREFIXED;
}
break;
default:
throw new IllegalArgumentException("Invalid Acl resource type: " + type);
}
if (kafkaName == null) {
throw new IllegalArgumentException("Name is required for resource type: " + type);
}
return new ResourcePattern(kafkaType, kafkaName, kafkaPattern);
}
|
@Test
public void testToKafkaResourcePatternForGroupResource() {
// Regular group
SimpleAclRuleResource groupResourceRules = new SimpleAclRuleResource("my-group", SimpleAclRuleResourceType.GROUP, AclResourcePatternType.LITERAL);
ResourcePattern expectedKafkaResourcePattern = new ResourcePattern(ResourceType.GROUP, "my-group", PatternType.LITERAL);
assertThat(groupResourceRules.toKafkaResourcePattern(), is(expectedKafkaResourcePattern));
// Prefixed group
groupResourceRules = new SimpleAclRuleResource("my-", SimpleAclRuleResourceType.GROUP, AclResourcePatternType.PREFIX);
expectedKafkaResourcePattern = new ResourcePattern(ResourceType.GROUP, "my-", PatternType.PREFIXED);
assertThat(groupResourceRules.toKafkaResourcePattern(), is(expectedKafkaResourcePattern));
}
|
@Override
public void createAgent(OFAgent ofAgent) {
checkNotNull(ofAgent, ERR_NULL_OFAGENT);
if (ofAgent.state() == STARTED) {
log.warn(String.format(MSG_OFAGENT, ofAgent.networkId(), ERR_IN_USE));
return;
}
// TODO check if the virtual network exists
ofAgentStore.createOfAgent(ofAgent);
log.info(String.format(MSG_OFAGENT, ofAgent.networkId(), MSG_CREATED));
}
|
@Test(expected = IllegalArgumentException.class)
public void testCreateDuplicateAgent() {
target.createAgent(OFAGENT_1);
target.createAgent(OFAGENT_1);
}
|
@Override
public boolean isMarshallable(Object o) {
return o instanceof Serializable;
}
|
@Test
public void testBoxedPrimitivesAndArray() throws Exception {
JavaSerializationMarshaller marshaller = new JavaSerializationMarshaller();
isMarshallable(marshaller, Byte.MAX_VALUE);
isMarshallable(marshaller, Short.MAX_VALUE);
isMarshallable(marshaller, Integer.MAX_VALUE);
isMarshallable(marshaller, Long.MAX_VALUE);
isMarshallable(marshaller, Float.MAX_VALUE);
isMarshallable(marshaller, Double.MAX_VALUE);
isMarshallable(marshaller, 'c');
isMarshallable(marshaller, "String");
}
|
public static int byteCount(Slice slice, int offset, int length, int codePointCount)
{
requireNonNull(slice, "slice is null");
if (length < 0) {
throw new IllegalArgumentException("length must be greater than or equal to zero");
}
if (offset < 0 || offset + length > slice.length()) {
throw new IllegalArgumentException("invalid offset/length");
}
if (codePointCount < 0) {
throw new IllegalArgumentException("codePointsCount must be greater than or equal to zero");
}
if (codePointCount == 0) {
return 0;
}
// min codepoint size is 1 byte.
// if size in bytes is less than the max length
// we don't need to decode codepoints
if (codePointCount > length) {
return length;
}
// get the end index with respect to code point count
int endIndex = offsetOfCodePoint(slice, offset, codePointCount);
if (endIndex < 0) {
// end index runs over the slice's length
return length;
}
if (offset > endIndex) {
throw new AssertionError("offset cannot be smaller than or equal to endIndex");
}
// end index could run over length because of large code points (e.g., 4-byte code points)
// or within length because of small code points (e.g., 1-byte code points)
return min(endIndex - offset, length);
}
|
@Test
public void testByteCount()
{
// Single byte code points
assertByteCount("abc", 0, 0, 1, "");
assertByteCount("abc", 0, 1, 0, "");
assertByteCount("abc", 1, 1, 1, "b");
assertByteCount("abc", 1, 1, 2, "b");
assertByteCount("abc", 1, 2, 1, "b");
assertByteCount("abc", 1, 2, 2, "bc");
assertByteCount("abc", 1, 2, 3, "bc");
assertByteCount("abc", 0, 3, 1, "a");
assertByteCount("abc", 0, 3, 5, "abc");
assertByteCountFailure("abc", 4, 5, 1);
assertByteCountFailure("abc", 5, 0, 1);
assertByteCountFailure("abc", -1, 1, 1);
assertByteCountFailure("abc", 1, -1, 1);
assertByteCountFailure("abc", 1, 1, -1);
// 2 bytes code points
assertByteCount("абв", 0, 0, 1, "");
assertByteCount("абв", 0, 1, 0, "");
assertByteCount("абв", 0, 2, 1, "а");
assertByteCount("абв", 0, 4, 1, "а");
assertByteCount("абв", 0, 1, 1, utf8Slice("а").getBytes(0, 1));
assertByteCount("абв", 2, 2, 2, "б");
assertByteCount("абв", 2, 2, 0, "");
assertByteCount("абв", 0, 3, 5, utf8Slice("аб").getBytes(0, 3));
assertByteCountFailure("абв", 8, 5, 1);
// we do not check if the offset is in the middle of a code point
assertByteCount("абв", 1, 1, 5, utf8Slice("а").getBytes(1, 1));
assertByteCount("абв", 2, 1, 5, utf8Slice("б").getBytes(0, 1));
// 3 bytes code points
assertByteCount("\u6000\u6001\u6002\u6003", 0, 0, 2, "");
assertByteCount("\u6000\u6001\u6002\u6003", 0, 1, 1, utf8Slice("\u6000").getBytes(0, 1));
assertByteCount("\u6000\u6001\u6002\u6003", 0, 2, 1, utf8Slice("\u6000").getBytes(0, 2));
assertByteCount("\u6000\u6001\u6002\u6003", 0, 3, 1, "\u6000");
assertByteCount("\u6000\u6001\u6002\u6003", 0, 6, 1, "\u6000");
assertByteCount("\u6000\u6001\u6002\u6003", 6, 2, 4, utf8Slice("\u6002").getBytes(0, 2));
assertByteCount("\u6000\u6001\u6002\u6003", 0, 12, 6, "\u6000\u6001\u6002\u6003");
// we do not check if the offset is in the middle of a code point
assertByteCount("\u6000\u6001\u6002\u6003", 1, 6, 2, utf8Slice("\u6000\u6001\u6002").getBytes(1, 6));
assertByteCount("\u6000\u6001\u6002\u6003", 2, 6, 2, utf8Slice("\u6000\u6001\u6002").getBytes(2, 6));
assertByteCount("\u6000\u6001\u6002\u6003", 3, 6, 2, utf8Slice("\u6000\u6001\u6002").getBytes(3, 6));
assertByteCountFailure("\u6000\u6001\u6002\u6003", 21, 0, 1);
// invalid code points; always return the original lengths unless code point count is 0
assertByteCount(new byte[] {(byte) 0x81, (byte) 0x81, (byte) 0x81}, 0, 2, 0, new byte[] {});
assertByteCount(new byte[] {(byte) 0x81, (byte) 0x81, (byte) 0x81}, 0, 2, 1, new byte[] {(byte) 0x81, (byte) 0x81});
assertByteCount(new byte[] {(byte) 0x81, (byte) 0x81, (byte) 0x81}, 0, 2, 3, new byte[] {(byte) 0x81, (byte) 0x81});
}
|
public static void checkNotNegative(long value, String argName) {
checkArgument(value >= 0, "'%s' must not be negative.", argName);
}
|
@Test
public void testCheckNotNegative() throws Exception {
int positiveArg = 1;
int zero = 0;
int negativeArg = -1;
// Should not throw.
Validate.checkNotNegative(zero, "zeroArg");
Validate.checkNotNegative(positiveArg, "positiveArg");
// Verify it throws.
intercept(IllegalArgumentException.class,
"'negativeArg' must not be negative",
() -> Validate.checkNotNegative(negativeArg, "negativeArg"));
}
|
public void expand(String key, long value, RangeHandler rangeHandler, EdgeHandler edgeHandler) {
if (value < lowerBound || value > upperBound) {
// Value outside bounds -> expand to nothing.
return;
}
int maxLevels = value > 0 ? maxPositiveLevels : maxNegativeLevels;
int sign = value > 0 ? 1 : -1;
// Append key to feature string builder
StringBuilder builder = new StringBuilder(128);
builder.append(key).append('=');
long levelSize = arity;
long edgeInterval = (value / arity) * arity;
edgeHandler.handleEdge(createEdgeFeatureHash(builder, edgeInterval), (int) Math.abs(value - edgeInterval));
for (int i = 0; i < maxLevels; ++i) {
long start = (value / levelSize) * levelSize;
if (Math.abs(start) + levelSize - 1 < 0) { // overflow
break;
}
rangeHandler.handleRange(createRangeFeatureHash(builder, start, start + sign * (levelSize - 1)));
levelSize *= arity;
if (levelSize <= 0 && levelSize != Long.MIN_VALUE) { //overflow
break;
}
}
}
|
@Test
void requireThatSmallRangeIsExpandedInArity2() {
PredicateRangeTermExpander expander = new PredicateRangeTermExpander(2);
Iterator<String> expectedLabels = List.of(
"key=42-43",
"key=40-43",
"key=40-47",
"key=32-47",
"key=32-63",
"key=0-63",
"key=0-127",
"key=0-255",
"key=0-511",
"key=0-1023",
"key=0-2047",
"key=0-4095",
"key=0-8191",
"key=0-16383",
"key=0-32767",
"key=0-65535",
"key=0-131071",
"key=0-262143",
"key=0-524287",
"key=0-1048575",
"key=0-2097151",
"key=0-4194303",
"key=0-8388607",
"key=0-16777215",
"key=0-33554431",
"key=0-67108863",
"key=0-134217727",
"key=0-268435455",
"key=0-536870911",
"key=0-1073741823",
"key=0-2147483647",
"key=0-4294967295",
"key=0-8589934591",
"key=0-17179869183",
"key=0-34359738367",
"key=0-68719476735",
"key=0-137438953471",
"key=0-274877906943",
"key=0-549755813887",
"key=0-1099511627775",
"key=0-2199023255551",
"key=0-4398046511103",
"key=0-8796093022207",
"key=0-17592186044415",
"key=0-35184372088831",
"key=0-70368744177663",
"key=0-140737488355327",
"key=0-281474976710655",
"key=0-562949953421311",
"key=0-1125899906842623",
"key=0-2251799813685247",
"key=0-4503599627370495",
"key=0-9007199254740991",
"key=0-18014398509481983",
"key=0-36028797018963967",
"key=0-72057594037927935",
"key=0-144115188075855871",
"key=0-288230376151711743",
"key=0-576460752303423487",
"key=0-1152921504606846975",
"key=0-2305843009213693951",
"key=0-4611686018427387903",
"key=0-9223372036854775807").iterator();
expander.expand("key", 42, range -> assertEquals(PredicateHash.hash64(expectedLabels.next()), range),
(edge, value) -> {
assertEquals(PredicateHash.hash64("key=42"), edge);
assertEquals(0, value);
});
assertFalse(expectedLabels.hasNext());
}
|
@Description("pads a string on the left")
@ScalarFunction("lpad")
@LiteralParameters({"x", "y"})
@SqlType(StandardTypes.VARCHAR)
public static Slice leftPad(@SqlType("varchar(x)") Slice text, @SqlType(StandardTypes.BIGINT) long targetLength, @SqlType("varchar(y)") Slice padString)
{
return pad(text, targetLength, padString, 0);
}
|
@Test
public void testLeftPad()
{
assertFunction("LPAD('text', 5, 'x')", VARCHAR, "xtext");
assertFunction("LPAD('text', 4, 'x')", VARCHAR, "text");
assertFunction("LPAD('text', 6, 'xy')", VARCHAR, "xytext");
assertFunction("LPAD('text', 7, 'xy')", VARCHAR, "xyxtext");
assertFunction("LPAD('text', 9, 'xyz')", VARCHAR, "xyzxytext");
assertFunction("LPAD('\u4FE1\u5FF5 \u7231 \u5E0C\u671B ', 10, '\u671B')", VARCHAR, "\u671B\u4FE1\u5FF5 \u7231 \u5E0C\u671B ");
assertFunction("LPAD('\u4FE1\u5FF5 \u7231 \u5E0C\u671B ', 11, '\u671B')", VARCHAR, "\u671B\u671B\u4FE1\u5FF5 \u7231 \u5E0C\u671B ");
assertFunction("LPAD('\u4FE1\u5FF5 \u7231 \u5E0C\u671B ', 12, '\u5E0C\u671B')", VARCHAR, "\u5E0C\u671B\u5E0C\u4FE1\u5FF5 \u7231 \u5E0C\u671B ");
assertFunction("LPAD('\u4FE1\u5FF5 \u7231 \u5E0C\u671B ', 13, '\u5E0C\u671B')", VARCHAR, "\u5E0C\u671B\u5E0C\u671B\u4FE1\u5FF5 \u7231 \u5E0C\u671B ");
assertFunction("LPAD('', 3, 'a')", VARCHAR, "aaa");
assertFunction("LPAD('abc', 0, 'e')", VARCHAR, "");
// truncation
assertFunction("LPAD('text', 3, 'xy')", VARCHAR, "tex");
assertFunction("LPAD('\u4FE1\u5FF5 \u7231 \u5E0C\u671B ', 5, '\u671B')", VARCHAR, "\u4FE1\u5FF5 \u7231 ");
// failure modes
assertInvalidFunction("LPAD('abc', 3, '')", "Padding string must not be empty");
// invalid target lengths
long maxSize = Integer.MAX_VALUE;
assertInvalidFunction("LPAD('abc', -1, 'foo')", "Target length must be in the range [0.." + maxSize + "]");
assertInvalidFunction("LPAD('abc', " + (maxSize + 1) + ", '')", "Target length must be in the range [0.." + maxSize + "]");
}
|
@Override
public String named() {
return PluginEnum.MODIFY_RESPONSE.getName();
}
|
@Test
public void testNamed() {
assertEquals(modifyResponsePlugin.named(), PluginEnum.MODIFY_RESPONSE.getName());
}
|
public static Document readDocument(@Nonnull final InputStream stream) throws ExecutionException, InterruptedException {
return readDocumentAsync(stream).get();
}
|
@Test
public void testString() throws Exception
{
// Setup test fixture.
final String input = "<foo><bar>test</bar></foo>";
// Execute system under test.
final Document output = SAXReaderUtil.readDocument(input);
// Verify result.
assertNotNull(output);
assertEquals("foo", output.getRootElement().getName());
assertNotNull(output.getRootElement().element("bar"));
assertEquals("test", output.getRootElement().elementText("bar"));
}
|
@Override
public List<String> listPartitionNames(Connection connection, String databaseName, String tableName) {
String partitionNamesQuery =
"SELECT PARTITION_DESCRIPTION as NAME FROM INFORMATION_SCHEMA.PARTITIONS WHERE TABLE_SCHEMA = ? " +
"AND TABLE_NAME = ? AND PARTITION_NAME IS NOT NULL " +
"AND ( PARTITION_METHOD = 'RANGE' or PARTITION_METHOD = 'RANGE COLUMNS') ORDER BY PARTITION_DESCRIPTION";
try (PreparedStatement ps = connection.prepareStatement(partitionNamesQuery)) {
ps.setString(1, databaseName);
ps.setString(2, tableName);
ResultSet rs = ps.executeQuery();
ImmutableList.Builder<String> list = ImmutableList.builder();
if (null != rs) {
while (rs.next()) {
String[] partitionNames = rs.getString("NAME").
replace("'", "").split(",");
for (String partitionName : partitionNames) {
list.add(partitionName);
}
}
return list.build();
} else {
return Lists.newArrayList();
}
} catch (SQLException | NullPointerException e) {
throw new StarRocksConnectorException(e.getMessage(), e);
}
}
|
@Test
public void testListPartitionNames() {
try {
JDBCMetadata jdbcMetadata = new JDBCMetadata(properties, "catalog", dataSource);
List<String> partitionNames = jdbcMetadata.listPartitionNames("test", "tbl1", TableVersionRange.empty());
Assert.assertFalse(partitionNames.isEmpty());
} catch (Exception e) {
System.out.println(e.getMessage());
Assert.fail();
}
}
|
@Override
public boolean process(NacosTask task) {
MergeDataTask mergeTask = (MergeDataTask) task;
final String dataId = mergeTask.dataId;
final String group = mergeTask.groupId;
final String tenant = mergeTask.tenant;
final String tag = mergeTask.tag;
final String clientIp = mergeTask.getClientIp();
try {
List<ConfigInfoAggr> datumList = new ArrayList<>();
int rowCount = configInfoAggrPersistService.aggrConfigInfoCount(dataId, group, tenant);
int pageCount = (int) Math.ceil(rowCount * 1.0 / PAGE_SIZE);
for (int pageNo = 1; pageNo <= pageCount; pageNo++) {
Page<ConfigInfoAggr> page = configInfoAggrPersistService.findConfigInfoAggrByPage(dataId, group, tenant,
pageNo, PAGE_SIZE);
if (page != null) {
datumList.addAll(page.getPageItems());
LOGGER.info("[merge-query] {}, {}, size/total={}/{}", dataId, group, datumList.size(), rowCount);
}
}
final Timestamp time = TimeUtils.getCurrentTime();
if (datumList.size() > 0) {
// merge
ConfigInfo cf = merge(dataId, group, tenant, datumList);
configInfoPersistService.insertOrUpdate(null, null, cf, null);
LOGGER.info("[merge-ok] {}, {}, size={}, length={}, md5={}, content={}", dataId, group,
datumList.size(), cf.getContent().length(), cf.getMd5(),
ContentUtils.truncateContent(cf.getContent()));
ConfigTraceService.logPersistenceEvent(dataId, group, tenant, null, time.getTime(),
InetUtils.getSelfIP(), ConfigTraceService.PERSISTENCE_EVENT,
ConfigTraceService.PERSISTENCE_TYPE_MERGE, cf.getContent());
} else {
String eventType;
// remove
if (StringUtils.isBlank(tag)) {
eventType = ConfigTraceService.PERSISTENCE_EVENT;
configInfoPersistService.removeConfigInfo(dataId, group, tenant, clientIp, null);
} else {
eventType = ConfigTraceService.PERSISTENCE_EVENT_TAG + "-" + tag;
configInfoTagPersistService.removeConfigInfoTag(dataId, group, tenant, tag, clientIp, null);
}
LOGGER.warn(
"[merge-delete] delete config info because no datum. dataId=" + dataId + ", groupId=" + group);
ConfigTraceService.logPersistenceEvent(dataId, group, tenant, null, time.getTime(),
InetUtils.getSelfIP(), eventType, ConfigTraceService.PERSISTENCE_TYPE_REMOVE, null);
}
NotifyCenter.publishEvent(new ConfigDataChangeEvent(false, dataId, group, tenant, tag, time.getTime()));
} catch (Exception e) {
mergeService.addMergeTask(dataId, group, tenant, mergeTask.getClientIp());
LOGGER.info("[merge-error] " + dataId + ", " + group + ", " + e.toString(), e);
}
return true;
}
|
@Test
void testMergerExistAggrConfig() throws InterruptedException {
String dataId = "dataId12345";
String group = "group123";
String tenant = "tenant1234";
when(configInfoAggrPersistService.aggrConfigInfoCount(eq(dataId), eq(group), eq(tenant))).thenReturn(2);
Page<ConfigInfoAggr> datumPage = new Page<>();
ConfigInfoAggr configInfoAggr1 = new ConfigInfoAggr();
configInfoAggr1.setContent("12344");
ConfigInfoAggr configInfoAggr2 = new ConfigInfoAggr();
configInfoAggr2.setContent("12345666");
datumPage.getPageItems().add(configInfoAggr1);
datumPage.getPageItems().add(configInfoAggr2);
when(configInfoAggrPersistService.findConfigInfoAggrByPage(eq(dataId), eq(group), eq(tenant), anyInt(), anyInt())).thenReturn(
datumPage);
when(configInfoPersistService.insertOrUpdate(eq(null), eq(null), any(ConfigInfo.class), eq(null))).thenReturn(
new ConfigOperateResult());
AtomicReference<ConfigDataChangeEvent> reference = new AtomicReference<>();
NotifyCenter.registerSubscriber(new Subscriber() {
@Override
public void onEvent(Event event) {
ConfigDataChangeEvent event1 = (ConfigDataChangeEvent) event;
if (event1.dataId.equals(dataId) && event1.group.equals(group) && tenant.equals(event1.tenant)) {
reference.set((ConfigDataChangeEvent) event);
}
}
@Override
public Class<? extends Event> subscribeType() {
return ConfigDataChangeEvent.class;
}
});
MergeDataTask mergeDataTask = new MergeDataTask(dataId, group, tenant, "127.0.0.1");
mergeTaskProcessor.process(mergeDataTask);
Mockito.verify(configInfoPersistService, times(1)).insertOrUpdate(eq(null), eq(null), any(ConfigInfo.class), eq(null));
Thread.sleep(1000L);
assertTrue(reference.get() != null);
}
|
public static void deleteDirectory(File file) {
Path pathToBeDeleted = file.toPath();
try {
Files.walk(pathToBeDeleted)
.sorted(Comparator.reverseOrder())
.map(Path::toFile)
.forEach(File::delete);
} catch (Exception e) {
throw new RuntimeException();
}
}
|
@Test
void testDeleteDirectory() throws Exception {
new File("target/foo/bar").mkdirs();
FileUtils.writeToFile(new File("target/foo/hello.txt"), "hello world");
FileUtils.writeToFile(new File("target/foo/bar/world.txt"), "hello again");
assertTrue(new File("target/foo/hello.txt").exists());
assertTrue(new File("target/foo/bar/world.txt").exists());
FileUtils.deleteDirectory(new File("target/foo"));
assertFalse(new File("target/foo/hello.txt").exists());
assertFalse(new File("target/foo/bar/world.txt").exists());
}
|
@Override
public List<DatabaseTableRespVO> getDatabaseTableList(Long dataSourceConfigId, String name, String comment) {
List<TableInfo> tables = databaseTableService.getTableList(dataSourceConfigId, name, comment);
// 移除在 Codegen 中,已经存在的
Set<String> existsTables = convertSet(
codegenTableMapper.selectListByDataSourceConfigId(dataSourceConfigId), CodegenTableDO::getTableName);
tables.removeIf(table -> existsTables.contains(table.getName()));
return BeanUtils.toBean(tables, DatabaseTableRespVO.class);
}
|
@Test
public void testGetDatabaseTableList() {
// 准备参数
Long dataSourceConfigId = randomLongId();
String name = randomString();
String comment = randomString();
// mock 方法
TableInfo tableInfo01 = mock(TableInfo.class);
when(tableInfo01.getName()).thenReturn("t_yunai");
when(tableInfo01.getComment()).thenReturn("芋艿");
TableInfo tableInfo02 = mock(TableInfo.class);
when(tableInfo02.getName()).thenReturn("t_yunai_02");
when(tableInfo02.getComment()).thenReturn("芋艿_02");
when(databaseTableService.getTableList(eq(dataSourceConfigId), eq(name), eq(comment)))
.thenReturn(ListUtil.toList(tableInfo01, tableInfo02));
// mock 数据
CodegenTableDO tableDO = randomPojo(CodegenTableDO.class,
o -> o.setScene(CodegenSceneEnum.ADMIN.getScene())
.setTableName("t_yunai_02")
.setDataSourceConfigId(dataSourceConfigId));
codegenTableMapper.insert(tableDO);
// 调用
List<DatabaseTableRespVO> result = codegenService.getDatabaseTableList(dataSourceConfigId, name, comment);
// 断言
assertEquals(1, result.size());
assertEquals("t_yunai", result.get(0).getName());
assertEquals("芋艿", result.get(0).getComment());
}
|
@Override public void onEvent(ApplicationEvent event) {
// only onRequest is used
}
|
@Test void onEvent_setsErrorWhenNotAlreadySet() {
setEventType(RequestEvent.Type.FINISHED);
setBaseUri("/");
when(request.getProperty(SpanCustomizer.class.getName())).thenReturn(span);
Exception error = new Exception();
when(requestEvent.getException()).thenReturn(error);
when(request.getProperty("error")).thenReturn(null);
listener.onEvent(requestEvent);
verify(request).setProperty("error", error);
}
|
@Override
public void removePod(String uid) {
checkArgument(!Strings.isNullOrEmpty(uid), ERR_NULL_POD_UID);
synchronized (this) {
if (isPodInUse(uid)) {
final String error = String.format(MSG_POD, uid, ERR_IN_USE);
throw new IllegalStateException(error);
}
Pod pod = k8sPodStore.removePod(uid);
if (pod != null) {
log.info(String.format(MSG_POD,
pod.getMetadata().getName(), MSG_REMOVED));
}
}
}
|
@Test(expected = IllegalArgumentException.class)
public void testRemovePodWithNull() {
target.removePod(null);
}
|
synchronized boolean groupSubscribe(Collection<String> topics) {
if (!hasAutoAssignedPartitions())
throw new IllegalStateException(SUBSCRIPTION_EXCEPTION_MESSAGE);
groupSubscription = new HashSet<>(topics);
return !subscription.containsAll(groupSubscription);
}
|
@Test
public void testGroupSubscribe() {
state.subscribe(singleton(topic1), Optional.of(rebalanceListener));
assertEquals(singleton(topic1), state.metadataTopics());
assertFalse(state.groupSubscribe(singleton(topic1)));
assertEquals(singleton(topic1), state.metadataTopics());
assertTrue(state.groupSubscribe(Utils.mkSet(topic, topic1)));
assertEquals(Utils.mkSet(topic, topic1), state.metadataTopics());
// `groupSubscribe` does not accumulate
assertFalse(state.groupSubscribe(singleton(topic1)));
assertEquals(singleton(topic1), state.metadataTopics());
state.subscribe(singleton("anotherTopic"), Optional.of(rebalanceListener));
assertEquals(Utils.mkSet(topic1, "anotherTopic"), state.metadataTopics());
assertFalse(state.groupSubscribe(singleton("anotherTopic")));
assertEquals(singleton("anotherTopic"), state.metadataTopics());
}
|
static void registerMethod(MetricsRegistry metricsRegistry, Object osBean, String methodName, String name) {
if (OperatingSystemMXBeanSupport.GET_FREE_PHYSICAL_MEMORY_SIZE_DISABLED
&& methodName.equals("getFreePhysicalMemorySize")) {
metricsRegistry.registerStaticProbe(osBean, name, MANDATORY, (LongProbeFunction<Object>) source -> -1);
} else {
registerMethod(metricsRegistry, osBean, methodName, name, 1);
}
}
|
@Test
public void registerMethod_whenDouble() {
FakeOperatingSystemBean fakeOperatingSystemBean = new FakeOperatingSystemBean();
registerMethod(metricsRegistry, fakeOperatingSystemBean, "doubleMethod", "doubleMethod");
DoubleGauge gauge = metricsRegistry.newDoubleGauge("doubleMethod");
assertEquals(fakeOperatingSystemBean.doubleMethod(), gauge.read(), 0.1);
}
|
public static <T extends Comparable<? super T>> T max(Collection<T> coll) {
return isEmpty(coll) ? null : Collections.max(coll);
}
|
@SuppressWarnings({"rawtypes", "unchecked"})
@Test
public void maxEmptyTest() {
final List<? extends Comparable> emptyList = Collections.emptyList();
assertNull(CollUtil.max(emptyList));
}
|
public static void destroyServer(ServerConfig serverConfig) {
try {
Server server = serverConfig.getServer();
if (server != null) {
serverConfig.setServer(null);
SERVER_MAP.remove(Integer.toString(serverConfig.getPort()));
server.destroy();
}
} catch (Exception e) {
LOGGER.error(LogCodes.getLog(LogCodes.ERROR_DESTROY_SERVER, serverConfig.getPort()), e);
}
}
|
@Test
public void destroyServer() {
ServerConfig serverConfig = new ServerConfig().setProtocol("test").setPort(1234);
Server server = serverConfig.buildIfAbsent();
Assert.assertNotNull(server);
Assert.assertEquals(1, ServerFactory.getServers().size());
serverConfig.destroy();
Assert.assertEquals(0, ServerFactory.getServers().size());
Assert.assertNull(serverConfig.getServer());
}
|
@GET
@Path("/apps/{appid}/containers/{containerid}")
@Produces(MediaType.APPLICATION_JSON)
public TimelineEntity getContainer(@Context HttpServletRequest req,
@Context HttpServletResponse res, @PathParam("appid") String appId,
@PathParam("containerid") String containerId,
@QueryParam("userid") String userId,
@QueryParam("flowname") String flowName,
@QueryParam("flowrunid") String flowRunId,
@QueryParam("confstoretrieve") String confsToRetrieve,
@QueryParam("metricstoretrieve") String metricsToRetrieve,
@QueryParam("fields") String fields,
@QueryParam("metricslimit") String metricsLimit,
@QueryParam("metricstimestart") String metricsTimeStart,
@QueryParam("metricstimeend") String metricsTimeEnd,
@QueryParam("entityidprefix") String entityIdPrefix) {
return getContainer(req, res, null, appId, containerId, userId, flowName,
flowRunId, confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
entityIdPrefix, metricsTimeStart, metricsTimeEnd);
}
|
@Test
void testGetContainer() throws Exception {
Client client = createClient();
try {
URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/"
+ "timeline/clusters/cluster1/apps/app1/"
+ "entities/YARN_CONTAINER/container_2_2");
ClientResponse resp = getResponse(client, uri);
TimelineEntity entities1 =
resp.getEntity(new GenericType<TimelineEntity>() {
});
assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
resp.getType().toString());
assertNotNull(entities1);
uri = URI.create("http://localhost:" + serverPort + "/ws/v2/"
+ "timeline/clusters/cluster1/apps/app1/containers/container_2_2");
resp = getResponse(client, uri);
TimelineEntity entities2 =
resp.getEntity(new GenericType<TimelineEntity>() {
});
assertEquals(MediaType.APPLICATION_JSON_TYPE, resp.getType());
assertNotNull(entities2);
assertEquals(entities1, entities2);
} finally {
client.destroy();
}
}
|
public static String unmangleXmlString(String str, boolean decodeEntityRefs)
throws UnmanglingError {
int slashPosition = -1;
String escapedCp = "";
StringBuilder bld = new StringBuilder();
StringBuilder entityRef = null;
for (int i = 0; i < str.length(); i++) {
char ch = str.charAt(i);
if (entityRef != null) {
entityRef.append(ch);
if (ch == ';') {
String e = entityRef.toString();
if (e.equals(""")) {
bld.append("\"");
} else if (e.equals("'")) {
bld.append("\'");
} else if (e.equals("&")) {
bld.append("&");
} else if (e.equals("<")) {
bld.append("<");
} else if (e.equals(">")) {
bld.append(">");
} else {
throw new UnmanglingError("Unknown entity ref " + e);
}
entityRef = null;
}
} else if ((slashPosition >= 0) && (slashPosition < NUM_SLASH_POSITIONS)) {
escapedCp += ch;
++slashPosition;
} else if (slashPosition == NUM_SLASH_POSITIONS) {
if (ch != ';') {
throw new UnmanglingError("unterminated code point escape: " +
"expected semicolon at end.");
}
try {
bld.appendCodePoint(Integer.parseInt(escapedCp, 16));
} catch (NumberFormatException e) {
throw new UnmanglingError("error parsing unmangling escape code", e);
}
escapedCp = "";
slashPosition = -1;
} else if (ch == '\\') {
slashPosition = 0;
} else {
boolean startingEntityRef = false;
if (decodeEntityRefs) {
startingEntityRef = (ch == '&');
}
if (startingEntityRef) {
entityRef = new StringBuilder();
entityRef.append("&");
} else {
bld.append(ch);
}
}
}
if (entityRef != null) {
throw new UnmanglingError("unterminated entity ref starting with " +
entityRef.toString());
} else if (slashPosition != -1) {
throw new UnmanglingError("unterminated code point escape: string " +
"broke off in the middle");
}
return bld.toString();
}
|
@Test
public void testInvalidSequence() throws Exception {
try {
XMLUtils.unmangleXmlString("\\000g;foo", false);
Assert.fail("expected an unmangling error");
} catch (UnmanglingError e) {
// pass through
}
try {
XMLUtils.unmangleXmlString("\\0", false);
Assert.fail("expected an unmangling error");
} catch (UnmanglingError e) {
// pass through
}
}
|
public static ShardingRouteEngine newInstance(final ShardingRule shardingRule, final ShardingSphereDatabase database, final QueryContext queryContext,
final ShardingConditions shardingConditions, final ConfigurationProperties props, final ConnectionContext connectionContext) {
SQLStatementContext sqlStatementContext = queryContext.getSqlStatementContext();
SQLStatement sqlStatement = sqlStatementContext.getSqlStatement();
if (sqlStatement instanceof TCLStatement) {
return new ShardingDatabaseBroadcastRoutingEngine();
}
if (sqlStatement instanceof DDLStatement) {
if (sqlStatementContext instanceof CursorAvailable) {
return getCursorRouteEngine(shardingRule, database, sqlStatementContext, queryContext.getHintValueContext(), shardingConditions, props);
}
return getDDLRoutingEngine(shardingRule, database, sqlStatementContext);
}
if (sqlStatement instanceof DALStatement) {
return getDALRoutingEngine(shardingRule, database, sqlStatementContext, connectionContext);
}
if (sqlStatement instanceof DCLStatement) {
return getDCLRoutingEngine(shardingRule, database, sqlStatementContext);
}
return getDQLRoutingEngine(shardingRule, database, sqlStatementContext, queryContext.getHintValueContext(), shardingConditions, props, connectionContext);
}
|
@Test
void assertNewInstanceForShowColumnsWithTableRule() {
DALStatement dalStatement = mock(MySQLShowColumnsStatement.class);
when(sqlStatementContext.getSqlStatement()).thenReturn(dalStatement);
tableNames.add("table_1");
when(shardingRule.getShardingRuleTableNames(tableNames)).thenReturn(tableNames);
QueryContext queryContext = new QueryContext(sqlStatementContext, "", Collections.emptyList(), new HintValueContext(), mockConnectionContext(), mock(ShardingSphereMetaData.class));
ShardingRouteEngine actual =
ShardingRouteEngineFactory.newInstance(shardingRule, database, queryContext, shardingConditions, props, new ConnectionContext(Collections::emptySet));
assertThat(actual, instanceOf(ShardingUnicastRoutingEngine.class));
}
|
public void resetTo(int position) {
ensureCapacity(position);
size = position;
}
|
@Test
public void testResetTo() throws Exception {
RandomAccessData stream = new RandomAccessData();
stream.asOutputStream().write(TEST_DATA_A);
stream.resetTo(1);
assertEquals(1, stream.size());
stream.asOutputStream().write(TEST_DATA_A);
assertArrayEquals(
new byte[] {0x01, 0x01, 0x02, 0x03}, Arrays.copyOf(stream.array(), stream.size()));
}
|
public static void validate(WindowConfig windowConfig) {
if (windowConfig.getWindowLengthDurationMs() == null && windowConfig.getWindowLengthCount() == null) {
throw new IllegalArgumentException("Window length is not specified");
}
if (windowConfig.getWindowLengthDurationMs() != null && windowConfig.getWindowLengthCount() != null) {
throw new IllegalArgumentException(
"Window length for time and count are set! Please set one or the other.");
}
if (windowConfig.getWindowLengthCount() != null) {
if (windowConfig.getWindowLengthCount() <= 0) {
throw new IllegalArgumentException(
"Window length must be positive [" + windowConfig.getWindowLengthCount() + "]");
}
}
if (windowConfig.getWindowLengthDurationMs() != null) {
if (windowConfig.getWindowLengthDurationMs() <= 0) {
throw new IllegalArgumentException(
"Window length must be positive [" + windowConfig.getWindowLengthDurationMs() + "]");
}
}
if (windowConfig.getSlidingIntervalCount() != null) {
if (windowConfig.getSlidingIntervalCount() <= 0) {
throw new IllegalArgumentException(
"Sliding interval must be positive [" + windowConfig.getSlidingIntervalCount() + "]");
}
}
if (windowConfig.getSlidingIntervalDurationMs() != null) {
if (windowConfig.getSlidingIntervalDurationMs() <= 0) {
throw new IllegalArgumentException(
"Sliding interval must be positive [" + windowConfig.getSlidingIntervalDurationMs() + "]");
}
}
if (windowConfig.getTimestampExtractorClassName() != null) {
if (windowConfig.getMaxLagMs() != null) {
if (windowConfig.getMaxLagMs() < 0) {
throw new IllegalArgumentException(
"Lag duration must be positive [" + windowConfig.getMaxLagMs() + "]");
}
}
if (windowConfig.getWatermarkEmitIntervalMs() != null) {
if (windowConfig.getWatermarkEmitIntervalMs() <= 0) {
throw new IllegalArgumentException(
"Watermark interval must be positive [" + windowConfig.getWatermarkEmitIntervalMs() + "]");
}
}
}
}
|
@Test
public void testSettingTumblingTimeWindow() throws Exception {
final Object[] args = new Object[]{-1L, 0L, 1L, 2L, 5L, 10L, null};
for (Object arg : args) {
Object arg0 = arg;
try {
Long windowLengthDuration = null;
if (arg0 != null) {
windowLengthDuration = (Long) arg0;
}
WindowConfig windowConfig = new WindowConfig();
windowConfig.setWindowLengthDurationMs(windowLengthDuration);
WindowConfigUtils.validate(windowConfig);
if (arg0 == null) {
fail(String.format("Window count duration cannot be null -- windowLengthDuration: %s",
arg0));
}
if ((Long) arg0 <= 0) {
fail(String.format("Window length cannot be zero or less -- windowLengthDuration: %s",
arg0));
}
} catch (IllegalArgumentException e) {
if (arg0 != null && (Long) arg0 > 0) {
fail(String.format("Exception: %s thrown on valid input -- windowLengthDuration: %s", e
.getMessage(), arg0));
}
}
}
}
|
@Override
public Map<Integer, List<Permission>> getPrintableSpecifiedPermissions(ApplicationId appId) {
return null;
}
|
@Test
public void testGetPrintableSpecifiedPermissions() {
Map<Integer, List<Permission>> result = getPrintablePermissionMap(getMaximumPermissions(appId));
assertNotNull(result.get(1).get(0));
assertTrue(result.get(1).size() > 0);
assertEquals("testNameAdmin", result.get(1).get(0).getName());
}
|
public void clear() {
final int arrayOffset = getHeadElementIndex();
Arrays.fill(queue, arrayOffset, arrayOffset + size, null);
size = 0;
}
|
@Test
void testClear() {
HeapPriorityQueue<TestElement> priorityQueueSet = newPriorityQueue(1);
int count = 10;
HashSet<TestElement> checkSet = new HashSet<>(count);
insertRandomElements(priorityQueueSet, checkSet, count);
assertThat(priorityQueueSet.size()).isEqualTo(count);
priorityQueueSet.clear();
assertThat(priorityQueueSet.size()).isZero();
}
|
public ReducingStateDescriptor(
String name, ReduceFunction<T> reduceFunction, Class<T> typeClass) {
super(name, typeClass, null);
this.reduceFunction = checkNotNull(reduceFunction);
if (reduceFunction instanceof RichFunction) {
throw new UnsupportedOperationException(
"ReduceFunction of ReducingState can not be a RichFunction.");
}
}
|
@Test
void testReducingStateDescriptor() throws Exception {
ReduceFunction<String> reducer = (a, b) -> a;
TypeSerializer<String> serializer =
new KryoSerializer<>(String.class, new SerializerConfigImpl());
ReducingStateDescriptor<String> descr =
new ReducingStateDescriptor<>("testName", reducer, serializer);
assertThat(descr.getName()).isEqualTo("testName");
assertThat(descr.getSerializer()).isNotNull();
assertThat(descr.getSerializer()).isEqualTo(serializer);
assertThat(descr.getReduceFunction()).isEqualTo(reducer);
ReducingStateDescriptor<String> copy = CommonTestUtils.createCopySerializable(descr);
assertThat(copy.getName()).isEqualTo("testName");
assertThat(copy.getSerializer()).isNotNull();
assertThat(copy.getSerializer()).isEqualTo(serializer);
}
|
public static void setCallId(Operation op, long callId) {
op.setCallId(callId);
}
|
@Test
public void testSetCallId() {
Operation operation = new DummyOperation();
setCallId(operation, 10);
assertEquals(10, operation.getCallId());
}
|
@Override
public void warmUpEncryptedKeys(String... keyNames) throws IOException {
Preconditions.checkArgument(providers.length > 0,
"No providers are configured");
boolean success = false;
IOException e = null;
for (KMSClientProvider provider : providers) {
try {
provider.warmUpEncryptedKeys(keyNames);
success = true;
} catch (IOException ioe) {
e = ioe;
LOG.error(
"Error warming up keys for provider with url"
+ "[" + provider.getKMSUrl() + "]", ioe);
}
}
if (!success && e != null) {
throw e;
}
}
|
@Test
public void testWarmUpEncryptedKeysWhenAllProvidersFail() throws Exception {
Configuration conf = new Configuration();
KMSClientProvider p1 = mock(KMSClientProvider.class);
String keyName = "key1";
Mockito.doThrow(new IOException(new AuthorizationException("p1"))).when(p1)
.warmUpEncryptedKeys(Mockito.anyString());
KMSClientProvider p2 = mock(KMSClientProvider.class);
Mockito.doThrow(new IOException(new AuthorizationException("p2"))).when(p2)
.warmUpEncryptedKeys(Mockito.anyString());
when(p1.getKMSUrl()).thenReturn("p1");
when(p2.getKMSUrl()).thenReturn("p2");
LoadBalancingKMSClientProvider kp = new LoadBalancingKMSClientProvider(
new KMSClientProvider[] {p1, p2}, 0, conf);
try {
kp.warmUpEncryptedKeys(keyName);
fail("Should fail since both providers threw IOException");
} catch (Exception e) {
assertTrue(e.getCause() instanceof IOException);
}
Mockito.verify(p1, Mockito.times(1)).warmUpEncryptedKeys(keyName);
Mockito.verify(p2, Mockito.times(1)).warmUpEncryptedKeys(keyName);
}
|
public Collection<SQLException> closeConnections(final boolean forceRollback) {
Collection<SQLException> result = new LinkedList<>();
synchronized (cachedConnections) {
resetSessionVariablesIfNecessary(cachedConnections.values(), result);
for (Connection each : cachedConnections.values()) {
try {
if (forceRollback && connectionSession.getTransactionStatus().isInTransaction()) {
each.rollback();
}
each.close();
} catch (final SQLException ex) {
result.add(ex);
}
}
cachedConnections.clear();
}
if (!forceRollback) {
connectionPostProcessors.clear();
}
return result;
}
|
@Test
void assertCloseConnectionsAndFailedToResetVariables() throws SQLException {
connectionSession.getRequiredSessionVariableRecorder().setVariable("key", "default");
Connection connection = mock(Connection.class, RETURNS_DEEP_STUBS);
when(connection.getMetaData().getDatabaseProductName()).thenReturn("PostgreSQL");
SQLException expectedException = new SQLException("");
when(connection.createStatement()).thenThrow(expectedException);
databaseConnectionManager.getCachedConnections().put("", connection);
Collection<SQLException> actualExceptions = databaseConnectionManager.closeConnections(false);
assertThat(actualExceptions, is(Collections.singletonList(expectedException)));
}
|
public Icon getIcon(final Entry entry) {
if (entry.getAttribute(ICON_INSTANCE) != null)
return entry.getAttribute(ICON_INSTANCE);
String key = (String) entry.getAttribute(ICON);
if (key == null) {
String name = entry.getName();
key = name + ".icon";
}
final Icon icon = resourceAccessor.getIcon(key);
return icon;
}
|
@Test
public void getsIconFromEntryAttributeIcon() throws Exception {
final Icon icon = new ImageIcon();
entry.setAttribute(EntryAccessor.ICON_INSTANCE, icon);
final Icon entryIcon = entryAccessor.getIcon(entry);
assertThat(entryIcon, equalTo(icon));
}
|
@Override
public void upgrade() {
try {
streamService.load(Stream.DEFAULT_STREAM_ID);
} catch (NotFoundException ignored) {
createDefaultStream();
}
}
|
@Test
public void upgrade() throws Exception {
final ArgumentCaptor<Stream> streamArgumentCaptor = ArgumentCaptor.forClass(Stream.class);
when(streamService.load("000000000000000000000001")).thenThrow(NotFoundException.class);
when(indexSetRegistry.getDefault()).thenReturn(indexSet);
migration.upgrade();
verify(streamService).save(streamArgumentCaptor.capture());
final Stream stream = streamArgumentCaptor.getValue();
assertThat(stream.getTitle()).isEqualTo("Default Stream");
assertThat(stream.getDisabled()).isFalse();
assertThat(stream.getMatchingType()).isEqualTo(StreamImpl.MatchingType.DEFAULT);
}
|
protected String toString(final Map<String, String> configuration) {
return String.join(", ", configuration.entrySet().stream().map(entry -> String.format("%s%s",
entry.getKey(), StringUtils.isNotBlank(entry.getValue()) ? String.format("=%s", entry.getValue()) : StringUtils.EMPTY)).collect(Collectors.toList()));
}
|
@Test
public void testToString() {
final Map<String, String> properties = new LinkedHashMap<>();
properties.put("fs.sync.mode", "online");
properties.put("fs.sync.indexer.enable", "");
properties.put("fs.lock.enable", "");
properties.put("fs.buffer.enable", "");
assertEquals("fs.sync.mode=online, fs.sync.indexer.enable, fs.lock.enable, fs.buffer.enable",
new BrickPreferencesRequestInterceptor().toString(properties));
}
|
void decode(int streamId, ByteBuf in, Http2Headers headers, boolean validateHeaders) throws Http2Exception {
Http2HeadersSink sink = new Http2HeadersSink(
streamId, headers, maxHeaderListSize, validateHeaders);
// Check for dynamic table size updates, which must occur at the beginning:
// https://www.rfc-editor.org/rfc/rfc7541.html#section-4.2
decodeDynamicTableSizeUpdates(in);
decode(in, sink);
// Now that we've read all of our headers we can perform the validation steps. We must
// delay throwing until this point to prevent dynamic table corruption.
sink.finish();
}
|
@Test
public void testLiteralWithIncrementalIndexingWithLargeValue() throws Http2Exception {
// Ignore header that exceeds max header size
final StringBuilder sb = new StringBuilder();
sb.append("4004");
sb.append(hex("name"));
sb.append("7F813F");
for (int i = 0; i < 8192; i++) {
sb.append("61"); // 'a'
}
assertThrows(Http2Exception.class, new Executable() {
@Override
public void execute() throws Throwable {
decode(sb.toString());
}
});
}
|
@Override
public CloudConfiguration getCloudConfiguration() {
return hdfsEnvironment.getCloudConfiguration();
}
|
@Test
public void testGetCloudConfiguration() {
CloudConfiguration cc = hudiMetadata.getCloudConfiguration();
Assert.assertEquals(cc.getCloudType(), CloudType.DEFAULT);
}
|
@Override
protected Result[] run(String value) {
final Grok grok = grokPatternRegistry.cachedGrokForPattern(this.pattern, this.namedCapturesOnly);
// the extractor instance is rebuilt every second anyway
final Match match = grok.match(value);
final Map<String, Object> matches = match.captureFlattened();
final List<Result> results = new ArrayList<>(matches.size());
for (final Map.Entry<String, Object> entry : matches.entrySet()) {
// never add null values to the results, those don't make sense for us
if (entry.getValue() != null) {
results.add(new Result(entry.getValue(), entry.getKey(), -1, -1));
}
}
return results.toArray(new Result[0]);
}
|
@Test
public void testDatatypeExtraction() {
final GrokExtractor extractor = makeExtractor("%{NUMBER:number;int}");
final Extractor.Result[] results = extractor.run("199999");
assertEquals("NUMBER is marked as UNWANTED and does not generate a field", 1, results.length);
assertEquals(Integer.class, results[0].getValue().getClass());
assertEquals(199999, results[0].getValue());
}
|
@Override
public void unbindSocialUser(Long userId, Integer userType, Integer socialType, String openid) {
// 获得 openid 对应的 SocialUserDO 社交用户
SocialUserDO socialUser = socialUserMapper.selectByTypeAndOpenid(socialType, openid);
if (socialUser == null) {
throw exception(SOCIAL_USER_NOT_FOUND);
}
// 获得对应的社交绑定关系
socialUserBindMapper.deleteByUserTypeAndUserIdAndSocialType(userType, userId, socialUser.getType());
}
|
@Test
public void testUnbindSocialUser_success() {
// 准备参数
Long userId = 1L;
Integer userType = UserTypeEnum.ADMIN.getValue();
Integer type = SocialTypeEnum.GITEE.getType();
String openid = "test_openid";
// mock 数据:社交用户
SocialUserDO socialUser = randomPojo(SocialUserDO.class).setType(type).setOpenid(openid);
socialUserMapper.insert(socialUser);
// mock 数据:社交绑定关系
SocialUserBindDO socialUserBind = randomPojo(SocialUserBindDO.class).setUserType(userType)
.setUserId(userId).setSocialType(type);
socialUserBindMapper.insert(socialUserBind);
// 调用
socialUserService.unbindSocialUser(userId, userType, type, openid);
// 断言
assertEquals(0, socialUserBindMapper.selectCount(null).intValue());
}
|
@Override
public boolean isAllowable(URL url, Invocation invocation) {
int rate = url.getMethodParameter(RpcUtils.getMethodName(invocation), TPS_LIMIT_RATE_KEY, -1);
long interval = url.getMethodParameter(
RpcUtils.getMethodName(invocation), TPS_LIMIT_INTERVAL_KEY, DEFAULT_TPS_LIMIT_INTERVAL);
String serviceKey = url.getServiceKey();
if (rate > 0) {
StatItem statItem = stats.get(serviceKey);
if (statItem == null) {
stats.putIfAbsent(serviceKey, new StatItem(serviceKey, rate, interval));
statItem = stats.get(serviceKey);
} else {
// rate or interval has changed, rebuild
if (statItem.getRate() != rate || statItem.getInterval() != interval) {
stats.put(serviceKey, new StatItem(serviceKey, rate, interval));
statItem = stats.get(serviceKey);
}
}
return statItem.isAllowable();
} else {
StatItem statItem = stats.get(serviceKey);
if (statItem != null) {
stats.remove(serviceKey);
}
}
return true;
}
|
@Test
void testTPSLimiterForMethodLevelConfig() {
Invocation invocation = new MockInvocation();
URL url = URL.valueOf("test://test");
url = url.addParameter(INTERFACE_KEY, "org.apache.dubbo.rpc.file.TpsService");
url = url.addParameter(TPS_LIMIT_RATE_KEY, TEST_LIMIT_RATE);
int tpsConfigForMethodLevel = 3;
url = url.addParameter("echo.tps", tpsConfigForMethodLevel);
url = url.addParameter(TPS_LIMIT_INTERVAL_KEY, 1000);
for (int i = 1; i <= tpsConfigForMethodLevel + 1; i++) {
if (i == tpsConfigForMethodLevel + 1) {
Assertions.assertFalse(defaultTPSLimiter.isAllowable(url, invocation));
} else {
Assertions.assertTrue(defaultTPSLimiter.isAllowable(url, invocation));
}
}
}
|
@Override
protected Result[] run(String value) {
final Map<String, Object> extractedJson;
try {
extractedJson = extractJson(value);
} catch (IOException e) {
throw new ExtractorException(e);
}
final List<Result> results = new ArrayList<>(extractedJson.size());
for (Map.Entry<String, Object> entry : extractedJson.entrySet()) {
results.add(new Result(entry.getValue(), entry.getKey(), -1, -1));
}
return results.toArray(new Result[results.size()]);
}
|
@Test
public void testRunWithObjectAndDifferentKeySeparator() throws Exception {
final JsonExtractor jsonExtractor = new JsonExtractor(new MetricRegistry(), "json", "title", 0L, Extractor.CursorStrategy.COPY,
"source", "target", ImmutableMap.<String, Object>of("key_separator", ":"), "user", Collections.<Converter>emptyList(), Extractor.ConditionType.NONE,
"");
final String value = "{\"object\": {\"text\": \"foobar\", \"number\": 1234.5678, \"bool\": true, \"nested\": {\"text\": \"foobar\"}}}";
final Extractor.Result[] results = jsonExtractor.run(value);
assertThat(results).contains(
new Extractor.Result("foobar", "object:text", -1, -1),
new Extractor.Result(1234.5678, "object:number", -1, -1),
new Extractor.Result(true, "object:bool", -1, -1),
new Extractor.Result("foobar", "object:nested:text", -1, -1)
);
}
|
public Future<Void> maybeRollingUpdate(Reconciliation reconciliation, int replicas, Labels selectorLabels, Function<Pod, List<String>> podRestart, TlsPemIdentity coTlsPemIdentity) {
String namespace = reconciliation.namespace();
// We prepare the list of expected Pods. This is needed as we need to account for pods which might be missing.
// We need to wait for them before rolling any running pods to avoid problems.
List<String> expectedPodNames = new ArrayList<>();
for (int i = 0; i < replicas; i++) {
expectedPodNames.add(KafkaResources.zookeeperPodName(reconciliation.name(), i));
}
return podOperator.listAsync(namespace, selectorLabels)
.compose(pods -> {
ZookeeperClusterRollContext clusterRollContext = new ZookeeperClusterRollContext();
for (String podName : expectedPodNames) {
Pod pod = pods.stream().filter(p -> podName.equals(p.getMetadata().getName())).findFirst().orElse(null);
if (pod != null) {
List<String> restartReasons = podRestart.apply(pod);
final boolean ready = podOperator.isReady(namespace, pod.getMetadata().getName());
ZookeeperPodContext podContext = new ZookeeperPodContext(podName, restartReasons, true, ready);
if (restartReasons != null && !restartReasons.isEmpty()) {
LOGGER.debugCr(reconciliation, "Pod {} should be rolled due to {}", podContext.getPodName(), restartReasons);
} else {
LOGGER.debugCr(reconciliation, "Pod {} does not need to be rolled", podContext.getPodName());
}
clusterRollContext.add(podContext);
} else {
// Pod does not exist, but we still add it to the roll context because we should not roll
// any other pods before it is ready
LOGGER.debugCr(reconciliation, "Pod {} does not exist and cannot be rolled", podName);
ZookeeperPodContext podContext = new ZookeeperPodContext(podName, null, false, false);
clusterRollContext.add(podContext);
}
}
if (clusterRollContext.requiresRestart()) {
return Future.succeededFuture(clusterRollContext);
} else {
return Future.succeededFuture(null);
}
}).compose(clusterRollContext -> {
if (clusterRollContext != null) {
Promise<Void> promise = Promise.promise();
Future<String> leaderFuture = leaderFinder.findZookeeperLeader(reconciliation, clusterRollContext.podNames(), coTlsPemIdentity);
leaderFuture.compose(leader -> {
LOGGER.debugCr(reconciliation, "Zookeeper leader is " + (ZookeeperLeaderFinder.UNKNOWN_LEADER.equals(leader) ? "unknown" : "pod " + leader));
Future<Void> fut = Future.succeededFuture();
// Then roll each non-leader pod => the leader is rolled last
for (ZookeeperPodContext podContext : clusterRollContext.getPodContextsWithNonExistingAndNonReadyFirst()) {
if (podContext.requiresRestart() && !podContext.getPodName().equals(leader)) {
LOGGER.debugCr(reconciliation, "Pod {} needs to be restarted", podContext.getPodName());
// roll the pod and wait until it is ready
// this prevents rolling into faulty state (note: this applies just for ZK pods)
fut = fut.compose(ignore -> restartPod(reconciliation, podContext.getPodName(), podContext.reasonsToRestart));
} else {
if (podContext.requiresRestart()) {
LOGGER.debugCr(reconciliation, "Deferring restart of leader {}", podContext.getPodName());
} else {
LOGGER.debugCr(reconciliation, "Pod {} does not need to be restarted", podContext.getPodName());
}
fut = fut.compose(ignore -> podOperator.readiness(reconciliation, reconciliation.namespace(), podContext.getPodName(), READINESS_POLLING_INTERVAL_MS, operationTimeoutMs));
}
}
// Check if we have a leader and if it needs rolling
if (ZookeeperLeaderFinder.UNKNOWN_LEADER.equals(leader) || clusterRollContext.get(leader) == null || !clusterRollContext.get(leader).requiresRestart()) {
return fut;
} else {
// Roll the leader pod
return fut.compose(ar -> {
// the leader is rolled as the last
LOGGER.debugCr(reconciliation, "Restarting leader pod (previously deferred) {}", leader);
return restartPod(reconciliation, leader, clusterRollContext.get(leader).reasonsToRestart);
});
}
}).onComplete(promise);
return promise.future();
} else {
return Future.succeededFuture();
}
});
}
|
@Test
public void testNonReadinessOfLeaderCanPreventAllPodRestarts(VertxTestContext context) {
final String followerPod1NeedsRestart = "name-zookeeper-1";
final String leaderPodNeedsRestartNonReady = "name-zookeeper-2";
final String followerPod2NeedsRestart = "name-zookeeper-0";
final Set<String> needsRestart = Set.of(followerPod2NeedsRestart, leaderPodNeedsRestartNonReady);
Function<Pod, List<String>> shouldRestart = pod -> {
if (needsRestart.contains(pod.getMetadata().getName())) {
return List.of("Should restart");
} else {
return List.of();
}
};
PodOperator podOperator = mock(PodOperator.class);
when(podOperator.isReady(any(), eq(followerPod2NeedsRestart))).thenReturn(true);
when(podOperator.isReady(any(), eq(followerPod1NeedsRestart))).thenReturn(true);
when(podOperator.isReady(any(), eq(leaderPodNeedsRestartNonReady))).thenReturn(false);
when(podOperator.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(PODS));
when(podOperator.readiness(any(), any(), eq(leaderPodNeedsRestartNonReady), anyLong(), anyLong())).thenReturn(Future.failedFuture("failure"));
ZookeeperLeaderFinder leaderFinder = mock(ZookeeperLeaderFinder.class);
when(leaderFinder.findZookeeperLeader(any(), any(), any())).thenReturn(Future.succeededFuture(leaderPodNeedsRestartNonReady));
MockZooKeeperRoller roller = new MockZooKeeperRoller(podOperator, leaderFinder, 300_00L);
roller.maybeRollingUpdate(Reconciliation.DUMMY_RECONCILIATION, 3, DUMMY_SELECTOR, shouldRestart, DUMMY_IDENTITY)
.onComplete(context.failing(v -> context.verify(() -> {
assertThat(roller.podRestarts.size(), is(0));
context.completeNow();
})));
}
|
public void setSignatureKeyFileName(String signatureKeyFileName) {
this.signatureKeyFileName = signatureKeyFileName;
}
|
@Test
void testSignatureVerificationOptionIgnore() throws Exception {
// encryptor is sending a PGP message with signature! Decryptor is ignoreing the signature
decryptor.setSignatureVerificationOption(PGPKeyAccessDataFormat.SIGNATURE_VERIFICATION_OPTION_IGNORE);
decryptor.setSignatureKeyUserids(null);
decryptor.setSignatureKeyFileName(null); // no public keyring! --> no signature validation possible
String payload = "Test Message";
MockEndpoint mock = getMockEndpoint("mock:unencrypted");
mock.expectedBodiesReceived(payload);
template.sendBody("direct:subkey", payload);
MockEndpoint.assertIsSatisfied(context);
}
|
public boolean isTaskDeployedAsFinished() {
if (jobManagerTaskRestore == null) {
return false;
}
return jobManagerTaskRestore.getTaskStateSnapshot().isTaskDeployedAsFinished();
}
|
@Test
void testStateRetrievingWithFinishedOperator() {
TaskStateSnapshot taskStateSnapshot = TaskStateSnapshot.FINISHED_ON_RESTORE;
JobManagerTaskRestore jobManagerTaskRestore =
new JobManagerTaskRestore(2, taskStateSnapshot);
TaskStateManagerImpl stateManager =
new TaskStateManagerImpl(
new JobID(),
createExecutionAttemptId(),
new TestTaskLocalStateStore(),
null,
null,
new TaskExecutorStateChangelogStoragesManager(),
jobManagerTaskRestore,
new TestCheckpointResponder());
assertThat(stateManager.isTaskDeployedAsFinished()).isTrue();
}
|
public void flush() {
try {
out.flush();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
|
@Test
void flush() {
out.append("Hello");
assertThat(bytes, bytes(equalTo("")));
out.flush();
assertThat(bytes, bytes(equalTo("Hello")));
}
|
protected List<String> parse(final int response, final String[] reply) {
final List<String> result = new ArrayList<String>(reply.length);
for(final String line : reply) {
// Some servers include the status code for every line.
if(line.startsWith(String.valueOf(response))) {
try {
String stripped = line;
stripped = StringUtils.strip(StringUtils.removeStart(stripped, String.valueOf(String.format("%d-", response))));
stripped = StringUtils.strip(StringUtils.removeStart(stripped, String.valueOf(response)));
result.add(stripped);
}
catch(IndexOutOfBoundsException e) {
log.error(String.format("Failed parsing line %s", line), e);
}
}
else {
result.add(StringUtils.strip(line));
}
}
return result;
}
|
@Test
public void testParseEgnyte() throws Exception {
final List<String> lines = Arrays.asList(
"200-drwx------ 0 - - 0 Jun 17 07:59 core",
"200 -rw------- 0 David-Kocher - 529 Jun 17 07:59 App.config");
final FTPFileEntryParser parser = new LaxUnixFTPEntryParser();
final List<String> list = new FTPStatListService(null, parser).parse(
200, lines.toArray(new String[lines.size()]));
assertEquals(2, list.size());
assertTrue(list.contains("drwx------ 0 - - 0 Jun 17 07:59 core"));
assertTrue(list.contains("-rw------- 0 David-Kocher - 529 Jun 17 07:59 App.config"));
final Path parent = new Path("/cyberduck", EnumSet.of(Path.Type.directory));
final AttributedList<Path> parsed = new FTPListResponseReader(parser, true).read(
parent, list);
assertEquals(2, parsed.size());
}
|
public RemotingDesc parserRemotingServiceInfo(Object bean, String beanName, RemotingParser remotingParser) {
if (remotingServiceMap.containsKey(bean)) {
return remotingServiceMap.get(bean);
}
RemotingDesc remotingBeanDesc = remotingParser.getServiceDesc(bean, beanName);
if (remotingBeanDesc == null) {
return null;
}
remotingServiceMap.put(bean, remotingBeanDesc);
if (remotingParser.isReference(bean, beanName)) {
//reference bean, TCC proxy
remotingBeanDesc.setReference(true);
}
return remotingBeanDesc;
}
|
@Test
public void testParserRemotingServiceInfo() {
SimpleRemoteBean remoteBean = new SimpleRemoteBean();
SimpleRemotingParser parser = new SimpleRemotingParser();
RemotingDesc desc = remotingParser.parserRemotingServiceInfo(remoteBean, remoteBean.getClass().getName(),
parser);
assertEquals(desc, remotingParser.parserRemotingServiceInfo(remoteBean, remoteBean.getClass().getName(),
parser));
assertEquals(Protocols.IN_JVM, desc.getProtocol());
assertEquals(SimpleRemoteBean.class, desc.getServiceClass());
}
|
@Override
@SuppressWarnings("all")
protected Mono<Void> doExecute(final ServerWebExchange exchange, final ShenyuPluginChain chain,
final SelectorData selector, final RuleData rule) {
String param = exchange.getAttribute(Constants.PARAM_TRANSFORM);
ShenyuContext shenyuContext = exchange.getAttribute(Constants.CONTEXT);
assert shenyuContext != null;
MetaData metaData = exchange.getAttribute(Constants.META_DATA);
if (!checkMetaData(metaData)) {
assert metaData != null;
LOG.error("path is :{}, meta data have error.... {}", shenyuContext.getPath(), metaData);
exchange.getResponse().setStatusCode(HttpStatus.INTERNAL_SERVER_ERROR);
Object error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.META_DATA_ERROR);
return WebFluxResultUtils.result(exchange, error);
}
if (StringUtils.isNoneBlank(metaData.getParameterTypes()) && StringUtils.isBlank(param)) {
exchange.getResponse().setStatusCode(HttpStatus.INTERNAL_SERVER_ERROR);
Object error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.MOTAN_HAVE_BODY_PARAM);
return WebFluxResultUtils.result(exchange, error);
}
final Mono<Object> result = motanProxyService.genericInvoker(param, metaData, exchange);
return result.then(chain.execute(exchange));
}
|
@Test
public void testDoExecute() {
when(chain.execute(exchange)).thenReturn(Mono.empty());
Mono<Void> result = motanPlugin.doExecute(exchange, chain, selectorData, ruleData);
StepVerifier.create(result).expectSubscription().verifyComplete();
}
|
@Override
public BytesInput getBytes() {
// The Page Header should include: blockSizeInValues, numberOfMiniBlocks, totalValueCount
if (deltaValuesToFlush != 0) {
flushBlockBuffer();
}
return BytesInput.concat(
config.toBytesInput(),
BytesInput.fromUnsignedVarInt(totalValueCount),
BytesInput.fromZigZagVarLong(firstValue),
BytesInput.from(baos));
}
|
@Test
public void shouldSkip() throws IOException {
long[] data = new long[5 * blockSize + 1];
for (int i = 0; i < data.length; i++) {
data[i] = i * 32;
}
writeData(data);
reader = new DeltaBinaryPackingValuesReader();
reader.initFromPage(100, writer.getBytes().toInputStream());
for (int i = 0; i < data.length; i++) {
if (i % 3 == 0) {
reader.skip();
} else {
assertEquals(i * 32, reader.readLong());
}
}
}
|
public Collection<String> getCandidateEIPs(String myInstanceId, String myZone) {
if (myZone == null) {
myZone = "us-east-1d";
}
Collection<String> eipCandidates = clientConfig.shouldUseDnsForFetchingServiceUrls()
? getEIPsForZoneFromDNS(myZone)
: getEIPsForZoneFromConfig(myZone);
if (eipCandidates == null || eipCandidates.size() == 0) {
throw new RuntimeException("Could not get any elastic ips from the EIP pool for zone :" + myZone);
}
return eipCandidates;
}
|
@Test
public void shouldFilterNonElasticNames() {
when(config.getRegion()).thenReturn("us-east-1");
List<String> hosts = Lists.newArrayList("example.com", "ec2-1-2-3-4.compute.amazonaws.com", "5.6.7.8",
"ec2-101-202-33-44.compute.amazonaws.com");
when(config.getEurekaServerServiceUrls(any(String.class))).thenReturn(hosts);
Collection<String> returnValue = eipManager.getCandidateEIPs("i-123", "us-east-1d");
assertEquals(2, returnValue.size());
assertTrue(returnValue.contains("1.2.3.4"));
assertTrue(returnValue.contains("101.202.33.44"));
}
|
@Override
public DeleteRecordsResult deleteRecords(final Map<TopicPartition, RecordsToDelete> recordsToDelete,
final DeleteRecordsOptions options) {
SimpleAdminApiFuture<TopicPartition, DeletedRecords> future = DeleteRecordsHandler.newFuture(recordsToDelete.keySet());
int timeoutMs = defaultApiTimeoutMs;
if (options.timeoutMs() != null) {
timeoutMs = options.timeoutMs();
}
DeleteRecordsHandler handler = new DeleteRecordsHandler(recordsToDelete, logContext, timeoutMs);
invokeDriver(handler, future, options.timeoutMs);
return new DeleteRecordsResult(future.all());
}
|
@Test
public void testDeleteRecordsMultipleSends() throws Exception {
String topic = "foo";
TopicPartition tp0 = new TopicPartition(topic, 0);
TopicPartition tp1 = new TopicPartition(topic, 1);
MockTime time = new MockTime();
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, mockCluster(3, 0))) {
List<Node> nodes = env.cluster().nodes();
List<MetadataResponse.PartitionMetadata> partitionMetadata = new ArrayList<>();
partitionMetadata.add(new MetadataResponse.PartitionMetadata(Errors.NONE, tp0,
Optional.of(nodes.get(0).id()), Optional.of(5), singletonList(nodes.get(0).id()),
singletonList(nodes.get(0).id()), Collections.emptyList()));
partitionMetadata.add(new MetadataResponse.PartitionMetadata(Errors.NONE, tp1,
Optional.of(nodes.get(1).id()), Optional.of(5), singletonList(nodes.get(1).id()),
singletonList(nodes.get(1).id()), Collections.emptyList()));
List<MetadataResponse.TopicMetadata> topicMetadata = new ArrayList<>();
topicMetadata.add(new MetadataResponse.TopicMetadata(Errors.NONE, topic, false, partitionMetadata));
env.kafkaClient().prepareResponse(RequestTestUtils.metadataResponse(env.cluster().nodes(),
env.cluster().clusterResource().clusterId(), env.cluster().controller().id(), topicMetadata));
env.kafkaClient().prepareResponseFrom(new DeleteRecordsResponse(new DeleteRecordsResponseData().setTopics(
new DeleteRecordsResponseData.DeleteRecordsTopicResultCollection(singletonList(new DeleteRecordsResponseData.DeleteRecordsTopicResult()
.setName(tp0.topic())
.setPartitions(new DeleteRecordsResponseData.DeleteRecordsPartitionResultCollection(singletonList(new DeleteRecordsResponseData.DeleteRecordsPartitionResult()
.setPartitionIndex(tp0.partition())
.setErrorCode(Errors.NONE.code())
.setLowWatermark(3)).iterator()))).iterator()))), nodes.get(0));
env.kafkaClient().disconnect(nodes.get(1).idString());
env.kafkaClient().createPendingAuthenticationError(nodes.get(1), 100);
Map<TopicPartition, RecordsToDelete> recordsToDelete = new HashMap<>();
recordsToDelete.put(tp0, RecordsToDelete.beforeOffset(10L));
recordsToDelete.put(tp1, RecordsToDelete.beforeOffset(10L));
DeleteRecordsResult results = env.adminClient().deleteRecords(recordsToDelete);
assertEquals(3L, results.lowWatermarks().get(tp0).get().lowWatermark());
TestUtils.assertFutureThrows(results.lowWatermarks().get(tp1), AuthenticationException.class);
}
}
|
@Override
public void checkpointCoordinator(long checkpointId, CompletableFuture<byte[]> result) {
// unfortunately, this method does not run in the scheduler executor, but in the
// checkpoint coordinator time thread.
// we can remove the delegation once the checkpoint coordinator runs fully in the
// scheduler's main thread executor
mainThreadExecutor.execute(() -> checkpointCoordinatorInternal(checkpointId, result));
}
|
@Test
void completedCheckpointFuture() throws Exception {
final EventReceivingTasks tasks = EventReceivingTasks.createForRunningTasks();
final OperatorCoordinatorHolder holder =
createCoordinatorHolder(tasks, TestingOperatorCoordinator::new);
final byte[] testData = new byte[] {11, 22, 33, 44};
final CompletableFuture<byte[]> checkpointFuture = new CompletableFuture<>();
holder.checkpointCoordinator(9L, checkpointFuture);
getCoordinator(holder).getLastTriggeredCheckpoint().complete(testData);
assertThat(checkpointFuture).isDone();
assertThatFuture(checkpointFuture).eventuallySucceeds().isEqualTo(testData);
}
|
static ProtocolHandlerWithClassLoader load(ProtocolHandlerMetadata metadata,
String narExtractionDirectory) throws IOException {
final File narFile = metadata.getArchivePath().toAbsolutePath().toFile();
NarClassLoader ncl = NarClassLoaderBuilder.builder()
.narFile(narFile)
.parentClassLoader(ProtocolHandler.class.getClassLoader())
.extractionDirectory(narExtractionDirectory)
.build();
ProtocolHandlerDefinition phDef = getProtocolHandlerDefinition(ncl);
if (StringUtils.isBlank(phDef.getHandlerClass())) {
throw new IOException("Protocol handler `" + phDef.getName() + "` does NOT provide a protocol"
+ " handler implementation");
}
try {
Class handlerClass = ncl.loadClass(phDef.getHandlerClass());
Object handler = handlerClass.getDeclaredConstructor().newInstance();
if (!(handler instanceof ProtocolHandler)) {
throw new IOException("Class " + phDef.getHandlerClass()
+ " does not implement protocol handler interface");
}
ProtocolHandler ph = (ProtocolHandler) handler;
return new ProtocolHandlerWithClassLoader(ph, ncl);
} catch (Throwable t) {
rethrowIOException(t);
return null;
}
}
|
@Test
public void testLoadProtocolHandlerBlankHandlerClass() throws Exception {
ProtocolHandlerDefinition def = new ProtocolHandlerDefinition();
def.setDescription("test-protocol-handler");
String archivePath = "/path/to/protocol/handler/nar";
ProtocolHandlerMetadata metadata = new ProtocolHandlerMetadata();
metadata.setDefinition(def);
metadata.setArchivePath(Paths.get(archivePath));
NarClassLoader mockLoader = mock(NarClassLoader.class);
when(mockLoader.getServiceDefinition(eq(PULSAR_PROTOCOL_HANDLER_DEFINITION_FILE)))
.thenReturn(ObjectMapperFactory.getYamlMapper().writer().writeValueAsString(def));
Class handlerClass = MockProtocolHandler.class;
when(mockLoader.loadClass(eq(MockProtocolHandler.class.getName())))
.thenReturn(handlerClass);
final NarClassLoaderBuilder mockedBuilder = mock(NarClassLoaderBuilder.class, RETURNS_SELF);
when(mockedBuilder.build()).thenReturn(mockLoader);
try (MockedStatic<NarClassLoaderBuilder> builder = Mockito.mockStatic(NarClassLoaderBuilder.class)) {
builder.when(() -> NarClassLoaderBuilder.builder()).thenReturn(mockedBuilder);
try {
ProtocolHandlerUtils.load(metadata, "");
fail("Should not reach here");
} catch (IOException ioe) {
// expected
}
}
}
|
public static CoderProvider fromStaticMethods(Class<?> rawType, Class<?> coderClazz) {
checkArgument(
Coder.class.isAssignableFrom(coderClazz),
"%s is not a subtype of %s",
coderClazz.getName(),
Coder.class.getSimpleName());
return new CoderProviderFromStaticMethods(rawType, coderClazz);
}
|
@Test
public void testKvCoderProvider() throws Exception {
TypeDescriptor<KV<Double, Double>> type =
TypeDescriptors.kvs(TypeDescriptors.doubles(), TypeDescriptors.doubles());
CoderProvider kvCoderProvider = CoderProviders.fromStaticMethods(KV.class, KvCoder.class);
assertEquals(
KvCoder.of(DoubleCoder.of(), DoubleCoder.of()),
kvCoderProvider.coderFor(type, Arrays.asList(DoubleCoder.of(), DoubleCoder.of())));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.