focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
boolean swapReplicas(BrokerAndSortedReplicas toSwap,
BrokerAndSortedReplicas toSwapWith,
double meanDiskUsage,
ClusterModel clusterModel,
Set<String> excludedTopics) {
if (LOG.isTraceEnabled()) {
LOG.trace("Swapping replicas between broker {}({}) and broker {}({})",
toSwap.broker().id(), dWrap(brokerSize(toSwap)), toSwapWith.broker().id(), dWrap(brokerSize(toSwapWith)));
}
double sizeToChange = toSwap.broker().capacityFor(DISK) * meanDiskUsage - brokerSize(toSwap);
NavigableSet<ReplicaWrapper> sortedReplicasToSwap = sortReplicasAscend(toSwap, excludedTopics);
NavigableSet<ReplicaWrapper> sortedLeadersToSwapWith = sortReplicasAscend(toSwapWith, excludedTopics);
NavigableSet<ReplicaWrapper> sortedFollowersToSwapWith = sortedFollowerReplicas(toSwapWith, excludedTopics);
// Depending on whether we need more or less disk usage, using ascending or descending iterator.
Iterator<ReplicaWrapper> toSwapIter =
sizeToChange > 0 ? sortedReplicasToSwap.iterator() : sortedReplicasToSwap.descendingIterator();
while (toSwapIter.hasNext()) {
Replica replicaToSwap = toSwapIter.next().replica();
if (excludedTopics.contains(replicaToSwap.topicPartition().topic())) {
continue;
}
// First make sure the replica is possible to be moved to the broker toSwapWith. If this check fails,
// don't bother to search for replica to swap with.
if (!possibleToMove(replicaToSwap, toSwapWith.broker(), clusterModel)) {
continue;
}
NavigableSet<ReplicaWrapper> sortedReplicasToSwapWith =
replicaToSwap.isLeader() ? sortedLeadersToSwapWith : sortedFollowersToSwapWith;
double sizeToSwap = replicaSize(replicaToSwap);
// No need to continue if we are trying to reduce the size and the replicas to swap out is of size 0.
if (sizeToChange < 0 && sizeToSwap == 0) {
break;
}
// when sizeToChange > 0, the broker toSwap needs more disk utilization, the replicaToSwapWith should meet the
// following requirements:
// 1. replicaToSwapWith.size() > replicaToSwap.size()
// 2. After the swap, the disk usage of broker toSwap should not be more than the disk usage of broker
// toSwapWith before the swap.
// 3. After the swap, the disk usage of broker toSwapWith should not be less than the disk usage of broker
// toSwap before the swap.
//
// When sizeToChange < 0, the broker toSwap needs less disk utilization, the replicaToSwapWith should meet the
// following requirements:
// 4. replicaToSwapWith.size < replicaToSwap.size()
// 5. After the swap, the disk usage of broker toSwap should not be less than the disk usage of broker
// toSwapWith before the swap.
// 6. After the swap, the disk usage of broker toSwapWith should not be more than the disk usage of broker
// toSwap before the swap.
//
// We do not require the swap to be under the balance upper limit or lower limit. Instead, we just ensure
// that after the swap, the two replicas are closer to the mean usage.
double maxSize = Double.MAX_VALUE;
double minSize = Double.MIN_VALUE;
if (sizeToChange > 0) {
// requirement 1
minSize = sizeToSwap;
// requirement 2
double maxSizeOfBrokerToSwap = diskUsage(toSwapWith) * toSwap.broker().capacityFor(DISK);
double currentSizeOfBrokerToSwap = brokerSize(toSwap);
// after given out the sizeToSwap, the maximum size the broker toSwap can take in.
maxSize = Math.min(maxSize, maxSizeOfBrokerToSwap - (currentSizeOfBrokerToSwap - sizeToSwap));
// requirement 3
double minSizeOfBrokerToSwapWith = diskUsage(toSwap) * toSwapWith.broker().capacityFor(DISK);
double currentSizeOfBrokerToSwapWith = brokerSize(toSwapWith);
// after take in the sizeToSwap, the maximum size the broker toSwapWith can give out.
maxSize = Math.min(maxSize, (currentSizeOfBrokerToSwapWith + sizeToSwap) - minSizeOfBrokerToSwapWith);
} else {
// requirement 4
maxSize = sizeToSwap;
// requirement 5
double minSizeOfBrokerToSwap = diskUsage(toSwapWith) * toSwap.broker().capacityFor(DISK);
double currentSizeOfBrokerToSwap = brokerSize(toSwap);
// After give out the sizeToSwap, the minimum size the broker toSwap should take in.
minSize = Math.max(minSize, minSizeOfBrokerToSwap - (currentSizeOfBrokerToSwap - sizeToSwap));
// requirement 6
double maxSizeOfBrokerToSwapWith = diskUsage(toSwap) * toSwapWith.broker().capacityFor(DISK);
double currentSizeOfBrokerToSwapWith = brokerSize(toSwapWith);
// after take in the sizeToSwap, the minimum size the broker toSwapWith should give out.
minSize = Math.max(minSize, (currentSizeOfBrokerToSwapWith + sizeToSwap) - maxSizeOfBrokerToSwapWith);
}
// Add the delta to the min and max size.
minSize += REPLICA_CONVERGENCE_DELTA;
maxSize -= REPLICA_CONVERGENCE_DELTA;
// The target size might be negative here. It would still work for our binary search purpose.
double targetSize = sizeToSwap + sizeToChange;
// Find a replica that is eligible for swap.
if (LOG.isTraceEnabled()) {
LOG.trace("replicaToSwap: {}(size={}), targetSize={}, minSize={}, maxSize={}",
replicaToSwap, dWrap(replicaSize(replicaToSwap)), dWrap(targetSize), dWrap(minSize), dWrap(maxSize));
}
Replica replicaToSwapWith = sortedReplicasToSwapWith.isEmpty() ? null : findReplicaToSwapWith(replicaToSwap, sortedReplicasToSwapWith,
targetSize, minSize, maxSize, clusterModel);
if (replicaToSwapWith != null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Found replica to swap. Swapping {}({}) on broker {}({}) and {}({}) on broker {}({})",
replicaToSwap.topicPartition(), dWrap(replicaSize(replicaToSwap)), toSwap.broker().id(),
dWrap(brokerSize(toSwap)), replicaToSwapWith.topicPartition(), dWrap(replicaSize(replicaToSwapWith)),
toSwapWith.broker().id(), dWrap(brokerSize(toSwapWith)));
}
clusterModel.relocateReplica(replicaToSwapWith.topicPartition(), toSwapWith.broker().id(), toSwap.broker().id());
clusterModel.relocateReplica(replicaToSwap.topicPartition(), toSwap.broker().id(), toSwapWith.broker().id());
toSwap.sortedReplicas().remove(replicaToSwap);
toSwap.sortedReplicas().add(replicaToSwapWith);
toSwapWith.sortedReplicas().remove(replicaToSwapWith);
toSwapWith.sortedReplicas().add(replicaToSwap);
return true;
}
}
LOG.trace("Nothing to swap between broker {} and broker {}", toSwap.broker().id(), toSwapWith.broker().id());
return false;
}
|
@Test
public void testSwapReplicas() {
Properties props = KafkaCruiseControlUnitTestUtils.getKafkaCruiseControlProperties();
props.setProperty(AnalyzerConfig.MAX_REPLICAS_PER_BROKER_CONFIG, Long.toString(10L));
props.setProperty(AnalyzerConfig.OVERPROVISIONED_MAX_REPLICAS_PER_BROKER_CONFIG, Long.toString(10L));
props.setProperty(AnalyzerConfig.DISK_BALANCE_THRESHOLD_CONFIG, "1.05");
BalancingConstraint balancingConstraint = new BalancingConstraint(new KafkaCruiseControlConfig(props));
KafkaAssignerDiskUsageDistributionGoal goal = new KafkaAssignerDiskUsageDistributionGoal(balancingConstraint);
ClusterModel clusterModel = createClusterModel();
Comparator<Replica> replicaComparator =
Comparator.comparingDouble((Replica r) -> r.load().expectedUtilizationFor(DISK))
.thenComparing(r -> r);
double meanDiskUsage = clusterModel.load().expectedUtilizationFor(DISK) / clusterModel.capacityFor(DISK);
assertTrue(goal.swapReplicas(new BrokerAndSortedReplicas(clusterModel.broker(0), replicaComparator),
new BrokerAndSortedReplicas(clusterModel.broker(1), replicaComparator),
meanDiskUsage,
clusterModel,
Collections.emptySet()));
assertFalse(goal.swapReplicas(new BrokerAndSortedReplicas(clusterModel.broker(0), replicaComparator),
new BrokerAndSortedReplicas(clusterModel.broker(2), replicaComparator),
meanDiskUsage,
clusterModel,
Collections.emptySet()));
assertTrue(goal.swapReplicas(new BrokerAndSortedReplicas(clusterModel.broker(2), replicaComparator),
new BrokerAndSortedReplicas(clusterModel.broker(3), replicaComparator),
meanDiskUsage,
clusterModel,
Collections.emptySet()));
}
|
public StreamConfig getConfiguration() {
return configuration;
}
|
@Test
void testEarlyCanceling() throws Exception {
final StreamConfig cfg = new StreamConfig(new Configuration());
cfg.setOperatorID(new OperatorID(4711L, 42L));
cfg.setStreamOperator(new SlowlyDeserializingOperator());
cfg.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);
cfg.serializeAllConfigs();
final TaskManagerActions taskManagerActions = spy(new NoOpTaskManagerActions());
try (NettyShuffleEnvironment shuffleEnvironment =
new NettyShuffleEnvironmentBuilder().build()) {
final Task task =
new TestTaskBuilder(shuffleEnvironment)
.setInvokable(SourceStreamTask.class)
.setTaskConfig(cfg.getConfiguration())
.setTaskManagerActions(taskManagerActions)
.build(EXECUTOR_EXTENSION.getExecutor());
final TaskExecutionState state =
new TaskExecutionState(task.getExecutionId(), ExecutionState.RUNNING);
task.startTaskThread();
verify(taskManagerActions, timeout(2000L)).updateTaskExecutionState(eq(state));
// send a cancel. because the operator takes a long time to deserialize, this should
// hit the task before the operator is deserialized
task.cancelExecution();
task.getExecutingThread().join();
assertThat(task.getExecutingThread().isAlive()).as("Task did not cancel").isFalse();
assertThat(task.getExecutionState()).isEqualTo(ExecutionState.CANCELED);
}
}
|
private void replay(ApiMessage message, Optional<OffsetAndEpoch> snapshotId, long offset) {
if (log.isTraceEnabled()) {
if (snapshotId.isPresent()) {
log.trace("Replaying snapshot {} record {}",
Snapshots.filenameFromSnapshotId(snapshotId.get()),
recordRedactor.toLoggableString(message));
} else {
log.trace("Replaying log record {} with offset {}",
recordRedactor.toLoggableString(message), offset);
}
}
logReplayTracker.replay(message);
MetadataRecordType type = MetadataRecordType.fromId(message.apiKey());
switch (type) {
case REGISTER_BROKER_RECORD:
clusterControl.replay((RegisterBrokerRecord) message, offset);
break;
case UNREGISTER_BROKER_RECORD:
clusterControl.replay((UnregisterBrokerRecord) message);
break;
case TOPIC_RECORD:
replicationControl.replay((TopicRecord) message);
break;
case PARTITION_RECORD:
replicationControl.replay((PartitionRecord) message);
break;
case CONFIG_RECORD:
configurationControl.replay((ConfigRecord) message);
break;
case PARTITION_CHANGE_RECORD:
replicationControl.replay((PartitionChangeRecord) message);
break;
case FENCE_BROKER_RECORD:
clusterControl.replay((FenceBrokerRecord) message);
break;
case UNFENCE_BROKER_RECORD:
clusterControl.replay((UnfenceBrokerRecord) message);
break;
case REMOVE_TOPIC_RECORD:
replicationControl.replay((RemoveTopicRecord) message);
break;
case FEATURE_LEVEL_RECORD:
featureControl.replay((FeatureLevelRecord) message);
handleFeatureControlChange();
break;
case CLIENT_QUOTA_RECORD:
clientQuotaControlManager.replay((ClientQuotaRecord) message);
break;
case PRODUCER_IDS_RECORD:
producerIdControlManager.replay((ProducerIdsRecord) message);
break;
case BROKER_REGISTRATION_CHANGE_RECORD:
clusterControl.replay((BrokerRegistrationChangeRecord) message);
break;
case ACCESS_CONTROL_ENTRY_RECORD:
aclControlManager.replay((AccessControlEntryRecord) message);
break;
case REMOVE_ACCESS_CONTROL_ENTRY_RECORD:
aclControlManager.replay((RemoveAccessControlEntryRecord) message);
break;
case USER_SCRAM_CREDENTIAL_RECORD:
scramControlManager.replay((UserScramCredentialRecord) message);
break;
case REMOVE_USER_SCRAM_CREDENTIAL_RECORD:
scramControlManager.replay((RemoveUserScramCredentialRecord) message);
break;
case DELEGATION_TOKEN_RECORD:
delegationTokenControlManager.replay((DelegationTokenRecord) message);
break;
case REMOVE_DELEGATION_TOKEN_RECORD:
delegationTokenControlManager.replay((RemoveDelegationTokenRecord) message);
break;
case NO_OP_RECORD:
// NoOpRecord is an empty record and doesn't need to be replayed
break;
case ZK_MIGRATION_STATE_RECORD:
featureControl.replay((ZkMigrationStateRecord) message);
break;
case BEGIN_TRANSACTION_RECORD:
offsetControl.replay((BeginTransactionRecord) message, offset);
break;
case END_TRANSACTION_RECORD:
offsetControl.replay((EndTransactionRecord) message, offset);
break;
case ABORT_TRANSACTION_RECORD:
offsetControl.replay((AbortTransactionRecord) message, offset);
break;
case REGISTER_CONTROLLER_RECORD:
clusterControl.replay((RegisterControllerRecord) message);
break;
default:
throw new RuntimeException("Unhandled record type " + type);
}
}
|
@Test
public void testActivationRecordsPartialTransactionNoSupport() {
SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext());
FeatureControlManager featureControlManager = new FeatureControlManager.Builder()
.setSnapshotRegistry(snapshotRegistry)
.setMetadataVersion(MetadataVersion.IBP_3_6_IV0)
.build();
OffsetControlManager offsetControlManager = new OffsetControlManager.Builder().build();
offsetControlManager.replay(new BeginTransactionRecord(), 10);
offsetControlManager.handleCommitBatch(Batch.data(20, 1, 1L, 0,
Collections.singletonList(new ApiMessageAndVersion(new BeginTransactionRecord(), (short) 0))));
assertThrows(RuntimeException.class, () ->
ActivationRecordsGenerator.generate(
msg -> { },
false,
offsetControlManager.transactionStartOffset(),
false,
BootstrapMetadata.fromVersion(MetadataVersion.IBP_3_6_IV0, "test"),
featureControlManager)
);
}
|
public List<UrlRewriteRule> getUrlRewriteRules() {
return urlRewriteRules;
}
|
@Test
public void testUrlRewriteRules() {
ExternalServiceConfig config = new ExternalServiceConfig();
assert config.getUrlRewriteRules().size() == 2;
}
|
@Override
public ParamCheckResponse checkParamInfoList(List<ParamInfo> paramInfos) {
ParamCheckResponse paramCheckResponse = new ParamCheckResponse();
if (paramInfos == null) {
paramCheckResponse.setSuccess(true);
return paramCheckResponse;
}
for (ParamInfo paramInfo : paramInfos) {
paramCheckResponse = checkParamInfoFormat(paramInfo);
if (!paramCheckResponse.isSuccess()) {
return paramCheckResponse;
}
}
paramCheckResponse.setSuccess(true);
return paramCheckResponse;
}
|
@Test
void testCheckParamInfoForNamespaceId() {
ParamInfo paramInfo = new ParamInfo();
ArrayList<ParamInfo> paramInfos = new ArrayList<>();
paramInfos.add(paramInfo);
// Max Length
String namespaceId = buildStringLength(65);
paramInfo.setNamespaceId(namespaceId);
ParamCheckResponse actual = paramChecker.checkParamInfoList(paramInfos);
assertFalse(actual.isSuccess());
assertEquals("Param 'namespaceId/tenant' is illegal, the param length should not exceed 64.", actual.getMessage());
// Pattern
paramInfo.setNamespaceId("hsbfkj@$!#khdkad");
actual = paramChecker.checkParamInfoList(paramInfos);
assertFalse(actual.isSuccess());
assertEquals("Param 'namespaceId/tenant' is illegal, illegal characters should not appear in the param.", actual.getMessage());
// Success
paramInfo.setNamespaceId("123-ashdal");
actual = paramChecker.checkParamInfoList(paramInfos);
assertTrue(actual.isSuccess());
}
|
public Optional<Throwable> run(String... arguments) {
try {
if (isFlag(HELP, arguments)) {
parser.printHelp(stdOut);
} else if (isFlag(VERSION, arguments)) {
parser.printVersion(stdOut);
} else {
final Namespace namespace = parser.parseArgs(arguments);
final Command command = requireNonNull(commands.get(namespace.getString(COMMAND_NAME_ATTR)),
"Command is not found");
try {
command.run(bootstrap, namespace);
} catch (Throwable e) {
// The command failed to run, and the command knows
// best how to cleanup / debug exception
command.onError(this, namespace, e);
return Optional.of(e);
}
}
return Optional.empty();
} catch (HelpScreenException ignored) {
// This exception is triggered when the user passes in a help flag.
// Return true to signal that the process executed normally.
return Optional.empty();
} catch (ArgumentParserException e) {
stdErr.println(e.getMessage());
e.getParser().printHelp(stdErr);
return Optional.of(e);
}
}
|
@Test
void handlesLongHelpCommands() throws Exception {
assertThat(cli.run("--help"))
.isEmpty();
assertThat(stdOut)
.hasToString(String.format(
"usage: java -jar dw-thing.jar [-h] [-v] {check,custom} ...%n" +
"%n" +
"positional arguments:%n" +
" {check,custom} available commands%n" +
"%n" +
"named arguments:%n" +
" -h, --help show this help message and exit%n" +
" -v, --version show the application version and exit%n"
));
assertThat(stdErr.toString())
.isEmpty();
}
|
public void setStringValue( List<String> value ) {
this.stringValue = value;
}
|
@Test
public void setStringValue() {
JobScheduleParam jobScheduleParam = mock( JobScheduleParam.class );
doCallRealMethod().when( jobScheduleParam ).setStringValue( any() );
List<String> stringValue = new ArrayList<>();
stringValue.add( "hitachi" );
jobScheduleParam.setStringValue( stringValue );
Assert.assertEquals( stringValue, ReflectionTestUtils.getField( jobScheduleParam, "stringValue" ) );
}
|
public void addIndexes(int maxIndex, int[] dictionaryIndexes, int indexCount)
{
if (indexCount == 0 && indexRetainedBytes > 0) {
// Ignore empty segment, since there are other segments present.
return;
}
checkState(maxIndex >= lastMaxIndex, "LastMax is greater than the current max");
lastMaxIndex = maxIndex;
if (maxIndex <= Byte.MAX_VALUE) {
byte[] byteIndexes = new byte[indexCount];
for (int i = 0; i < indexCount; i++) {
byteIndexes[i] = (byte) dictionaryIndexes[i];
}
appendByteIndexes(byteIndexes);
}
else if (maxIndex <= Short.MAX_VALUE) {
short[] shortIndexes = new short[indexCount];
for (int i = 0; i < indexCount; i++) {
shortIndexes[i] = (short) dictionaryIndexes[i];
}
appendShortIndexes(shortIndexes);
}
else {
int[] intIndexes = Arrays.copyOf(dictionaryIndexes, indexCount);
appendIntegerIndexes(intIndexes);
}
}
|
@Test(expectedExceptions = {IllegalStateException.class})
public void testDecreasingMaxThrows()
{
DictionaryRowGroupBuilder rowGroupBuilder = new DictionaryRowGroupBuilder();
rowGroupBuilder.addIndexes(5, new int[0], 0);
rowGroupBuilder.addIndexes(3, new int[1], 1);
}
|
@Override
public Long sendSingleSms(String mobile, Long userId, Integer userType,
String templateCode, Map<String, Object> templateParams) {
// 校验短信模板是否合法
SmsTemplateDO template = validateSmsTemplate(templateCode);
// 校验短信渠道是否合法
SmsChannelDO smsChannel = validateSmsChannel(template.getChannelId());
// 校验手机号码是否存在
mobile = validateMobile(mobile);
// 构建有序的模板参数。为什么放在这个位置,是提前保证模板参数的正确性,而不是到了插入发送日志
List<KeyValue<String, Object>> newTemplateParams = buildTemplateParams(template, templateParams);
// 创建发送日志。如果模板被禁用,则不发送短信,只记录日志
Boolean isSend = CommonStatusEnum.ENABLE.getStatus().equals(template.getStatus())
&& CommonStatusEnum.ENABLE.getStatus().equals(smsChannel.getStatus());
String content = smsTemplateService.formatSmsTemplateContent(template.getContent(), templateParams);
Long sendLogId = smsLogService.createSmsLog(mobile, userId, userType, isSend, template, content, templateParams);
// 发送 MQ 消息,异步执行发送短信
if (isSend) {
smsProducer.sendSmsSendMessage(sendLogId, mobile, template.getChannelId(),
template.getApiTemplateId(), newTemplateParams);
}
return sendLogId;
}
|
@Test
public void testSendSingleSms_successWhenSmsTemplateDisable() {
// 准备参数
String mobile = randomString();
Long userId = randomLongId();
Integer userType = randomEle(UserTypeEnum.values()).getValue();
String templateCode = randomString();
Map<String, Object> templateParams = MapUtil.<String, Object>builder().put("code", "1234")
.put("op", "login").build();
// mock SmsTemplateService 的方法
SmsTemplateDO template = randomPojo(SmsTemplateDO.class, o -> {
o.setStatus(CommonStatusEnum.DISABLE.getStatus());
o.setContent("验证码为{code}, 操作为{op}");
o.setParams(Lists.newArrayList("code", "op"));
});
when(smsTemplateService.getSmsTemplateByCodeFromCache(eq(templateCode))).thenReturn(template);
String content = randomString();
when(smsTemplateService.formatSmsTemplateContent(eq(template.getContent()), eq(templateParams)))
.thenReturn(content);
// mock SmsChannelService 的方法
SmsChannelDO smsChannel = randomPojo(SmsChannelDO.class, o -> o.setStatus(CommonStatusEnum.ENABLE.getStatus()));
when(smsChannelService.getSmsChannel(eq(template.getChannelId()))).thenReturn(smsChannel);
// mock SmsLogService 的方法
Long smsLogId = randomLongId();
when(smsLogService.createSmsLog(eq(mobile), eq(userId), eq(userType), eq(Boolean.FALSE), eq(template),
eq(content), eq(templateParams))).thenReturn(smsLogId);
// 调用
Long resultSmsLogId = smsSendService.sendSingleSms(mobile, userId, userType, templateCode, templateParams);
// 断言
assertEquals(smsLogId, resultSmsLogId);
// 断言调用
verify(smsProducer, times(0)).sendSmsSendMessage(anyLong(), anyString(),
anyLong(), any(), anyList());
}
|
public static void setWorkingDirectory(Job job, Path workingDirectory) {
job.getConfiguration().set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH,
workingDirectory.toString());
}
|
@Test
public void testSetWorkingDirectory() {
try {
Job job = Job.getInstance(new Configuration());
Assert.assertEquals(null, CopyOutputFormat.getWorkingDirectory(job));
job.getConfiguration().set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, "");
Assert.assertEquals(null, CopyOutputFormat.getWorkingDirectory(job));
Path directory = new Path("/tmp/test");
CopyOutputFormat.setWorkingDirectory(job, directory);
Assert.assertEquals(directory, CopyOutputFormat.getWorkingDirectory(job));
Assert.assertEquals(directory.toString(), job.getConfiguration().
get(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH));
} catch (IOException e) {
LOG.error("Exception encountered while running test", e);
Assert.fail("Failed while testing for set Working Directory");
}
}
|
public void close(ThreadLocal<DelegatingDbSessionSupplier> dbSessionThreadLocal, String label) {
DelegatingDbSessionSupplier delegatingDbSessionSupplier = dbSessionThreadLocal.get();
boolean getCalled = delegatingDbSessionSupplier.isPopulated();
if (getCalled) {
try {
DbSession res = delegatingDbSessionSupplier.get();
res.close();
} catch (Exception e) {
LOG.error(format("Failed to close %s connection in %s", label, currentThread()), e);
}
}
}
|
@Test
void openSession_with_caching_returns_DbSession_that_rolls_back_on_close_if_any_mutation_call_was_not_followed_by_commit_nor_rollback() throws SQLException {
DbSession dbSession = openSessionAndDoSeveralMutatingAndNeutralCalls();
dbSession.close();
verify(myBatisDbSession).rollback();
}
|
@SuppressWarnings({
"nullness" // TODO(https://github.com/apache/beam/issues/20497)
})
public static TableReference parseTableSpec(String tableSpec) {
Matcher match = BigQueryIO.TABLE_SPEC.matcher(tableSpec);
if (!match.matches()) {
throw new IllegalArgumentException(
String.format(
"Table specification [%s] is not in one of the expected formats ("
+ " [project_id]:[dataset_id].[table_id],"
+ " [project_id].[dataset_id].[table_id],"
+ " [dataset_id].[table_id])",
tableSpec));
}
TableReference ref = new TableReference();
ref.setProjectId(match.group("PROJECT"));
return ref.setDatasetId(match.group("DATASET")).setTableId(match.group("TABLE"));
}
|
@Test
public void testTablesspecParsingStandardSql() {
TableReference ref = BigQueryHelpers.parseTableSpec("my-project.data_set.table_name");
assertEquals("my-project", ref.getProjectId());
assertEquals("data_set", ref.getDatasetId());
assertEquals("table_name", ref.getTableId());
}
|
static String resolveRegion(AwsConfig awsConfig, AwsMetadataApi metadataApi, Environment environment) {
if (!isNullOrEmptyAfterTrim(awsConfig.getRegion())) {
return awsConfig.getRegion();
}
if (environment.isRunningOnEcs()) {
return regionFrom(metadataApi.availabilityZoneEcs());
}
return regionFrom(metadataApi.availabilityZoneEc2());
}
|
@Test
public void resolveRegionAwsConfig() {
// given
String region = "us-east-1";
AwsConfig awsConfig = AwsConfig.builder().setRegion(region).build();
AwsMetadataApi awsMetadataApi = mock(AwsMetadataApi.class);
Environment environment = mock(Environment.class);
// when
String result = resolveRegion(awsConfig, awsMetadataApi, environment);
// then
assertEquals(region, result);
}
|
@Override
public void finished(boolean allStepsExecuted) {
if (postProjectAnalysisTasks.length == 0) {
return;
}
ProjectAnalysisImpl projectAnalysis = createProjectAnalysis(allStepsExecuted ? SUCCESS : FAILED);
for (PostProjectAnalysisTask postProjectAnalysisTask : postProjectAnalysisTasks) {
executeTask(projectAnalysis, postProjectAnalysisTask);
}
}
|
@Test
@UseDataProvider("booleanValues")
public void organization_is_null(boolean allStepsExecuted) {
underTest.finished(allStepsExecuted);
verify(postProjectAnalysisTask).finished(taskContextCaptor.capture());
assertThat(taskContextCaptor.getValue().getProjectAnalysis().getOrganization()).isEmpty();
}
|
public abstract int[] toTopLevelIndexes();
|
@Test
void testToTopLevelIndexes() {
assertThat(Projection.of(new int[] {1, 2, 3, 4}).toTopLevelIndexes())
.isEqualTo(new int[] {1, 2, 3, 4});
assertThat(
Projection.of(new int[][] {new int[] {4}, new int[] {1}, new int[] {2}})
.toTopLevelIndexes())
.isEqualTo(new int[] {4, 1, 2});
assertThatThrownBy(
() ->
Projection.of(
new int[][] {
new int[] {4}, new int[] {1, 3}, new int[] {2}
})
.toTopLevelIndexes())
.isInstanceOf(IllegalStateException.class);
}
|
@Override
public Long clusterCountKeysInSlot(int slot) {
RedisClusterNode node = clusterGetNodeForSlot(slot);
MasterSlaveEntry entry = executorService.getConnectionManager().getEntry(new InetSocketAddress(node.getHost(), node.getPort()));
RFuture<Long> f = executorService.readAsync(entry, StringCodec.INSTANCE, RedisCommands.CLUSTER_COUNTKEYSINSLOT, slot);
return syncFuture(f);
}
|
@Test
public void testClusterCountKeysInSlot() {
Long t = connection.clusterCountKeysInSlot(1);
assertThat(t).isZero();
}
|
public static String formatSql(final AstNode root) {
final StringBuilder builder = new StringBuilder();
new Formatter(builder).process(root, 0);
return StringUtils.stripEnd(builder.toString(), "\n");
}
|
@Test
public void shouldFormatCtasWithClause() {
final String statementString = "CREATE TABLE S WITH(partitions=4) AS SELECT * FROM address;";
final Statement statement = parseSingle(statementString);
final String result = SqlFormatter.formatSql(statement);
assertThat(result, startsWith("CREATE TABLE S WITH (PARTITIONS=4) AS SELECT"));
}
|
public static String getDbNameFromJdbcUrl(String jdbcUrl) {
Matcher matcher = JDBC_PATTERN.matcher(jdbcUrl);
if (matcher.find()) {
return matcher.group(3);
} else {
LOG.error("jdbc url {} is not valid.", jdbcUrl);
}
return null;
}
|
@Test
public void getDbTest() {
assert ObReaderUtils.getDbNameFromJdbcUrl("jdbc:mysql://127.0.0.1:3306/testdb").equalsIgnoreCase("testdb");
assert ObReaderUtils.getDbNameFromJdbcUrl("jdbc:oceanbase://127.0.0.1:2883/testdb").equalsIgnoreCase("testdb");
assert ObReaderUtils.getDbNameFromJdbcUrl("||_dsc_ob10_dsc_||obcluster:mysql||_dsc_ob10_dsc_||jdbc:mysql://127.0.0.1:3306/testdb").equalsIgnoreCase("testdb");
assert ObReaderUtils.getDbNameFromJdbcUrl("||_dsc_ob10_dsc_||obcluster:oracle||_dsc_ob10_dsc_||jdbc:oceanbase://127.0.0.1:3306/testdb").equalsIgnoreCase("testdb");
}
|
@Override
public <R extends MessageResponse<?>> void chatStream(Prompt<R> prompt, StreamResponseListener<R> listener, ChatOptions options) {
LlmClient llmClient = new SseClient();
Map<String, String> headers = new HashMap<>();
headers.put("Content-Type", "application/json");
headers.put("Authorization", "Bearer " + getConfig().getApiKey());
String payload = OpenAiLLmUtil.promptToPayload(prompt, config, options, true);
String endpoint = config.getEndpoint();
LlmClientListener clientListener = new BaseLlmClientListener(this, llmClient, listener, prompt, streamMessageParser, functionMessageParser);
llmClient.start(endpoint + "/v1/chat/completions", headers, payload, clientListener, config);
}
|
@Test()
public void testChat01() {
OpenAiLlmConfig config = new OpenAiLlmConfig();
config.setApiKey("sk-alQ9N********");
config.setEndpoint("https://api.moonshot.cn");
config.setModel("moonshot-v1-8k");
// config.setDebug(true);
Llm llm = new OpenAiLlm(config);
// String response = llm.chat("请问你叫什么名字");
llm.chatStream("你叫什么名字", new StreamResponseListener<AiMessageResponse>() {
@Override
public void onMessage(ChatContext context, AiMessageResponse response) {
System.out.println(response.getMessage().getContent());
}
});
try {
Thread.sleep(2000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
|
@Override
public TimelineEntity getEntity(TimelineReaderContext context,
TimelineDataToRetrieve dataToRetrieve) throws IOException {
String flowRunPathStr = getFlowRunPath(context.getUserId(),
context.getClusterId(), context.getFlowName(), context.getFlowRunId(),
context.getAppId());
Path clusterIdPath = new Path(entitiesPath, context.getClusterId());
Path flowRunPath = new Path(clusterIdPath, flowRunPathStr);
Path appIdPath = new Path(flowRunPath, context.getAppId());
Path entityTypePath = new Path(appIdPath, context.getEntityType());
Path entityFilePath = getNormalPath(new Path(entityTypePath,
context.getEntityId() + TIMELINE_SERVICE_STORAGE_EXTENSION));
if (entityFilePath == null) {
return null;
}
try (BufferedReader reader =
new BufferedReader(new InputStreamReader(
fs.open(entityFilePath), StandardCharsets.UTF_8))) {
TimelineEntity entity = readEntityFromFile(reader);
return createEntityToBeReturned(
entity, dataToRetrieve.getFieldsToRetrieve());
} catch (FileNotFoundException e) {
LOG.info("Cannot find entity {id:" + context.getEntityId() + " , type:" +
context.getEntityType() + "}. Will send HTTP 404 in response.");
return null;
}
}
|
@Test
void testAppFlowMappingCsv() throws Exception {
// Test getting an entity by cluster and app where flow entry
// in app flow mapping csv has commas.
TimelineEntity result = reader.getEntity(
new TimelineReaderContext("cluster1", null, null, null, "app2",
"app", "id_5"),
new TimelineDataToRetrieve(null, null, null, null, null, null));
assertEquals(
(new TimelineEntity.Identifier("app", "id_5")).toString(),
result.getIdentifier().toString());
assertEquals((Long) 1425016502050L, result.getCreatedTime());
}
|
static S3ResourceId fromUri(String uri) {
Matcher m = S3_URI.matcher(uri);
checkArgument(m.matches(), "Invalid S3 URI: [%s]", uri);
String scheme = m.group("SCHEME");
String bucket = m.group("BUCKET");
String key = Strings.nullToEmpty(m.group("KEY"));
if (!key.startsWith("/")) {
key = "/" + key;
}
return fromComponents(scheme, bucket, key);
}
|
@Test
public void testInvalidPathNoBucketAndSlash() {
thrown.expect(IllegalArgumentException.class);
thrown.expectMessage("Invalid S3 URI: [s3:///]");
S3ResourceId.fromUri("s3:///");
}
|
@Override
public FailureResult howToHandleFailure(
Throwable failure, CompletableFuture<Map<String, String>> failureLabels) {
FailureResult failureResult = howToHandleFailure(failure);
if (reportEventsAsSpans) {
// TODO: replace with reporting as event once events are supported.
// Add reporting as callback for when the failure labeling is completed.
failureLabels.thenAcceptAsync(
(labels) -> jobFailureMetricReporter.reportJobFailure(failureResult, labels),
componentMainThreadExecutor);
}
return failureResult;
}
|
@Test
void testHowToHandleFailureRejectedByStrategy() throws Exception {
final Configuration configuration = new Configuration();
configuration.set(TraceOptions.REPORT_EVENTS_AS_SPANS, Boolean.TRUE);
final List<Span> spanCollector = new ArrayList<>(1);
final UnregisteredMetricGroups.UnregisteredJobManagerJobMetricGroup testMetricGroup =
createTestMetricGroup(spanCollector);
final AdaptiveScheduler scheduler =
new AdaptiveSchedulerBuilder(
createJobGraph(),
mainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.setRestartBackoffTimeStrategy(NoRestartBackoffTimeStrategy.INSTANCE)
.setJobMasterConfiguration(configuration)
.setJobManagerJobMetricGroup(testMetricGroup)
.build();
assertThat(
scheduler
.howToHandleFailure(
new Exception("test"), createFailureLabelsFuture())
.canRestart())
.isFalse();
assertThat(spanCollector).isEmpty();
mainThreadExecutor.trigger();
checkMetrics(spanCollector, false);
}
|
public boolean allSearchFiltersVisible() {
return hiddenSearchFiltersIDs.isEmpty();
}
|
@Test
void testAllSearchFiltersVisibleReturnsTrueIfHiddenFilterIsAllowed() {
toTest = new SearchFilterVisibilityCheckStatus(Collections.singletonList("Allowed hidden one"));
assertTrue(toTest.allSearchFiltersVisible(ImmutableList.of("Allowed hidden one", "Another allowed hidden one")));
}
|
public static boolean isBlank(final CharSequence cs) {
final int strLen;
if (cs == null || (strLen = cs.length()) == 0) {
return true;
}
for (int i = 0; i < strLen; i++) {
if (!Character.isWhitespace(cs.charAt(i))) {
return false;
}
}
return true;
}
|
@Test
void testIsBlank() {
assertTrue(StringUtils.isBlank(null));
assertTrue(StringUtils.isBlank(""));
assertTrue(StringUtils.isBlank(" "));
assertFalse(StringUtils.isBlank("bob"));
assertFalse(StringUtils.isBlank(" bob "));
}
|
@Override
public boolean alterOffsets(Map<String, String> connectorConfig, Map<Map<String, ?>, Map<String, ?>> offsets) {
for (Map.Entry<Map<String, ?>, Map<String, ?>> offsetEntry : offsets.entrySet()) {
Map<String, ?> sourceOffset = offsetEntry.getValue();
if (sourceOffset == null) {
// We allow tombstones for anything; if there's garbage in the offsets for the connector, we don't
// want to prevent users from being able to clean it up using the REST API
continue;
}
Map<String, ?> sourcePartition = offsetEntry.getKey();
if (sourcePartition == null) {
throw new ConnectException("Source partitions may not be null");
}
MirrorUtils.validateSourcePartitionString(sourcePartition, SOURCE_CLUSTER_KEY);
MirrorUtils.validateSourcePartitionString(sourcePartition, TOPIC_KEY);
MirrorUtils.validateSourcePartitionPartition(sourcePartition);
MirrorUtils.validateSourceOffset(sourcePartition, sourceOffset, false);
}
// We never commit offsets with our source consumer, so no additional effort is required beyond just validating
// the format of the user-supplied offsets
return true;
}
|
@Test
public void testAlterOffsetsTombstones() {
MirrorCheckpointConnector connector = new MirrorCheckpointConnector();
Function<Map<String, ?>, Boolean> alterOffsets = partition -> connector.alterOffsets(
null,
Collections.singletonMap(partition, null)
);
Map<String, Object> partition = sourcePartition("kips", 875, "apache.kafka");
assertTrue(() -> alterOffsets.apply(partition));
partition.put(PARTITION_KEY, "a string");
assertTrue(() -> alterOffsets.apply(partition));
partition.remove(PARTITION_KEY);
assertTrue(() -> alterOffsets.apply(partition));
assertTrue(() -> alterOffsets.apply(null));
assertTrue(() -> alterOffsets.apply(Collections.emptyMap()));
assertTrue(() -> alterOffsets.apply(Collections.singletonMap("unused_partition_key", "unused_partition_value")));
}
|
@Override
public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException {
ReflectionUtils.doWithMethods(bean.getClass(), recurringJobFinderMethodCallback);
return bean;
}
|
@Test
void beansWithMethodsAnnotatedWithRecurringAnnotationHasDisabledCronExpressionValueShouldBeDeleted() {
new ApplicationContextRunner()
.withBean(RecurringJobPostProcessor.class)
.withBean(JobScheduler.class, () -> jobScheduler)
.withPropertyValues("my-job.id=my-recurring-job-to-be-deleted")
.withPropertyValues("my-job.cron=-")
.withPropertyValues("my-job.zone-id=Asia/Taipei")
.run(context -> {
context.getBean(RecurringJobPostProcessor.class)
.postProcessAfterInitialization(new MyServiceWithRecurringAnnotationContainingPropertyPlaceholder(), "not important");
verify(jobScheduler).deleteRecurringJob("my-recurring-job-to-be-deleted");
});
}
|
public IssuesChangesNotification newIssuesChangesNotification(Set<DefaultIssue> issues, Map<String, UserDto> assigneesByUuid) {
AnalysisChange change = new AnalysisChange(analysisMetadataHolder.getAnalysisDate());
Set<ChangedIssue> changedIssues = issues.stream()
.map(issue -> new ChangedIssue.Builder(issue.key())
.setAssignee(getAssignee(issue.assignee(), assigneesByUuid))
.setNewStatus(issue.status())
.setNewIssueStatus(issue.status() != null ? IssueStatus.of(issue.status(), issue.resolution()) : null)
.setRule(getRuleByRuleKey(issue.ruleKey()))
.setProject(getProject())
.build())
.collect(Collectors.toSet());
return issuesChangesSerializer.serialize(new IssuesChangesNotificationBuilder(changedIssues, change));
}
|
@Test
public void newIssuesChangesNotification_fails_with_NPE_if_issue_has_no_rule() {
DefaultIssue issue = new DefaultIssue();
Map<String, UserDto> assigneesByUuid = nonEmptyAssigneesByUuid();
analysisMetadata.setAnalysisDate(new Random().nextLong());
assertThatThrownBy(() -> underTest.newIssuesChangesNotification(ImmutableSet.of(issue), assigneesByUuid))
.isInstanceOf(NullPointerException.class);
}
|
public void release() {
if (isReleased()) {
return;
}
released = true;
// decrease ref count when buffer is released from memory.
buffer.recycleBuffer();
}
|
@Test
void testBufferReleaseRepeatedly() {
bufferContext.release();
assertThatNoException()
.as("repeatedly release should only recycle buffer once.")
.isThrownBy(() -> bufferContext.release());
}
|
@Override
public CompletableFuture<MeterStoreResult> addOrUpdateMeter(Meter meter) {
checkArgument(validIndex(meter), "Meter index is not valid");
CompletableFuture<MeterStoreResult> future = new CompletableFuture<>();
MeterKey key = MeterKey.key(meter.deviceId(), meter.meterCellId());
MeterData data = new MeterData(meter, null);
futures.put(key, future);
try {
meters.compute(key, (k, v) -> data);
} catch (StorageException e) {
log.error("{} thrown a storage exception: {}", e.getStackTrace()[0].getMethodName(),
e.getMessage(), e);
futures.remove(key);
future.completeExceptionally(e);
}
return future;
}
|
@Test(expected = IllegalArgumentException.class)
public void testInvalidCellId() {
initMeterStore(true);
// MF defines an end index equals to 10
Meter meterBad = DefaultMeter.builder()
.forDevice(did3)
.fromApp(APP_ID)
.withCellId(invalidCid)
.withUnit(Meter.Unit.KB_PER_SEC)
.withBands(Collections.singletonList(b1))
.build();
((DefaultMeter) meterBad).setState(MeterState.PENDING_ADD);
meterStore.addOrUpdateMeter(meterBad);
}
|
@Override
public void reset() {
resetCount++;
super.reset();
initEvaluatorMap();
initCollisionMaps();
root.recursiveReset();
resetTurboFilterList();
cancelScheduledTasks();
fireOnReset();
resetListenersExceptResetResistant();
resetStatusListenersExceptResetResistant();
}
|
@Test
public void evaluatorMapPostReset() {
lc.reset();
assertNotNull(lc.getObject(CoreConstants.EVALUATOR_MAP));
}
|
void handleCollision(Collection<? extends Point> toCheck, Map<Integer, Bubble> allBubbles) {
var toBePopped = false; //if any other bubble collides with it, made true
for (var point : toCheck) {
var otherId = point.id;
if (allBubbles.get(otherId) != null //the bubble hasn't been popped yet
&& this.id != otherId //the two bubbles are not the same
&& this.touches(allBubbles.get(otherId))) { //the bubbles touch
allBubbles.get(otherId).pop(allBubbles);
toBePopped = true;
}
}
if (toBePopped) {
this.pop(allBubbles);
}
}
|
@Test
void handleCollisionTest() {
var b1 = new Bubble(0, 0, 1, 2);
var b2 = new Bubble(1, 1, 2, 1);
var b3 = new Bubble(10, 10, 3, 1);
var bubbles = new HashMap<Integer, Bubble>();
bubbles.put(1, b1);
bubbles.put(2, b2);
bubbles.put(3, b3);
var bubblesToCheck = new ArrayList<Point>();
bubblesToCheck.add(b2);
bubblesToCheck.add(b3);
b1.handleCollision(bubblesToCheck, bubbles);
//b1 touches b2 and not b3, so b1, b2 will be popped
assertNull(bubbles.get(1));
assertNull(bubbles.get(2));
assertNotNull(bubbles.get(3));
}
|
public static int standardErrorToBuckets(double maxStandardError)
{
checkCondition(maxStandardError >= LOWEST_MAX_STANDARD_ERROR && maxStandardError <= HIGHEST_MAX_STANDARD_ERROR,
INVALID_FUNCTION_ARGUMENT,
"Max standard error must be in [%s, %s]: %s", LOWEST_MAX_STANDARD_ERROR, HIGHEST_MAX_STANDARD_ERROR, maxStandardError);
return log2Ceiling((int) Math.ceil(1.0816 / (maxStandardError * maxStandardError)));
}
|
@Test
public void testStandardErrorToBuckets()
{
assertEquals(standardErrorToBuckets(0.0326), 1024);
assertEquals(standardErrorToBuckets(0.0325), 1024);
assertEquals(standardErrorToBuckets(0.0324), 2048);
assertEquals(standardErrorToBuckets(0.0231), 2048);
assertEquals(standardErrorToBuckets(0.0230), 2048);
assertEquals(standardErrorToBuckets(0.0229), 4096);
assertEquals(standardErrorToBuckets(0.0164), 4096);
assertEquals(standardErrorToBuckets(0.0163), 4096);
assertEquals(standardErrorToBuckets(0.0162), 8192);
assertEquals(standardErrorToBuckets(0.0116), 8192);
assertEquals(standardErrorToBuckets(0.0115), 8192);
assertEquals(standardErrorToBuckets(0.0114), 16384);
assertEquals(standardErrorToBuckets(0.008126), 16384);
assertEquals(standardErrorToBuckets(0.008125), 16384);
assertEquals(standardErrorToBuckets(0.008124), 32768);
assertEquals(standardErrorToBuckets(0.00576), 32768);
assertEquals(standardErrorToBuckets(0.00575), 32768);
assertEquals(standardErrorToBuckets(0.00574), 65536);
assertEquals(standardErrorToBuckets(0.0040626), 65536);
assertEquals(standardErrorToBuckets(0.0040625), 65536);
}
|
public void validateAll(final Map<String, Object> properties) {
final Set<String> propsDenied = Sets.intersection(immutableProps, properties.keySet());
if (!propsDenied.isEmpty()) {
throw new KsqlException(String.format("One or more properties overrides set locally are "
+ "prohibited by the KSQL server (use UNSET to reset their default value): %s",
propsDenied));
}
}
|
@Test
public void shouldNotThrowOnAllowedProp() {
validator.validateAll(ImmutableMap.of(
"mutable-1", "v1",
"anything", "v2"
));
}
|
public PartitionMetadata from(Struct row) {
return PartitionMetadata.newBuilder()
.setPartitionToken(row.getString(COLUMN_PARTITION_TOKEN))
.setParentTokens(Sets.newHashSet(row.getStringList(COLUMN_PARENT_TOKENS)))
.setStartTimestamp(row.getTimestamp(COLUMN_START_TIMESTAMP))
.setEndTimestamp(row.getTimestamp(COLUMN_END_TIMESTAMP))
.setHeartbeatMillis(row.getLong(COLUMN_HEARTBEAT_MILLIS))
.setState(State.valueOf(row.getString(COLUMN_STATE)))
.setWatermark(row.getTimestamp(COLUMN_WATERMARK))
.setCreatedAt(row.getTimestamp(COLUMN_CREATED_AT))
.setScheduledAt(
!row.isNull(COLUMN_SCHEDULED_AT) ? row.getTimestamp(COLUMN_SCHEDULED_AT) : null)
.setRunningAt(!row.isNull(COLUMN_RUNNING_AT) ? row.getTimestamp(COLUMN_RUNNING_AT) : null)
.setFinishedAt(
!row.isNull(COLUMN_FINISHED_AT) ? row.getTimestamp(COLUMN_FINISHED_AT) : null)
.build();
}
|
@Test
public void testMapPartitionMetadataFromResultSet() {
final Struct row =
Struct.newBuilder()
.set(COLUMN_PARTITION_TOKEN)
.to("token")
.set(COLUMN_PARENT_TOKENS)
.toStringArray(Collections.singletonList("parentToken"))
.set(COLUMN_START_TIMESTAMP)
.to(Timestamp.ofTimeMicroseconds(10L))
.set(COLUMN_END_TIMESTAMP)
.to(Timestamp.ofTimeMicroseconds(20L))
.set(COLUMN_HEARTBEAT_MILLIS)
.to(5_000L)
.set(COLUMN_STATE)
.to(State.RUNNING.name())
.set(COLUMN_WATERMARK)
.to(Timestamp.ofTimeMicroseconds(30L))
.set(COLUMN_CREATED_AT)
.to(Timestamp.ofTimeMicroseconds(40L))
.set(COLUMN_SCHEDULED_AT)
.to(Timestamp.ofTimeMicroseconds(50L))
.set(COLUMN_RUNNING_AT)
.to(Timestamp.ofTimeMicroseconds(60L))
.set(COLUMN_FINISHED_AT)
.to(Timestamp.ofTimeMicroseconds(70L))
.build();
final PartitionMetadata partition = mapper.from(row);
assertEquals(
new PartitionMetadata(
"token",
Sets.newHashSet("parentToken"),
Timestamp.ofTimeMicroseconds(10L),
Timestamp.ofTimeMicroseconds(20L),
5_000L,
State.RUNNING,
Timestamp.ofTimeMicroseconds(30),
Timestamp.ofTimeMicroseconds(40),
Timestamp.ofTimeMicroseconds(50),
Timestamp.ofTimeMicroseconds(60),
Timestamp.ofTimeMicroseconds(70)),
partition);
}
|
static public Entry buildMenuStructure(String xml) {
final Reader reader = new StringReader(xml);
return buildMenuStructure(reader);
}
|
@Test
public void givenXmlWithChildEntryWithBuilderSpecificAttribute_createsStructureWithChildEntry() {
String xmlWithoutContent = "<FreeplaneUIEntries><Entry builderSpecificAttribute='Value'/></FreeplaneUIEntries>";
Entry builtMenuStructure = XmlEntryStructureBuilder.buildMenuStructure(xmlWithoutContent);
Entry menuStructureWithChildEntry = new Entry();
final Entry childEntry = new Entry();
childEntry.setAttribute("builderSpecificAttribute", "Value");
menuStructureWithChildEntry.addChild(childEntry);
assertThat(builtMenuStructure, equalTo(menuStructureWithChildEntry));
}
|
@GET
@Path("enabledPackage")
public Response getAllEnabledPackageInfo() {
try {
return new JsonResponse<>(Response.Status.OK, "", helium.getAllEnabledPackages()).build();
} catch (RuntimeException e) {
logger.error(e.getMessage(), e);
return new JsonResponse<>(Response.Status.INTERNAL_SERVER_ERROR, e.getMessage()).build();
}
}
|
@Test
void testGetAllEnabledPackageInfo() throws IOException {
// No enabled packages initially
CloseableHttpResponse get1 = httpGet("/helium/enabledPackage");
assertThat(get1, isAllowed());
Map<String, Object> resp1 = gson.fromJson(EntityUtils.toString(get1.getEntity(), StandardCharsets.UTF_8),
new TypeToken<Map<String, Object>>() { }.getType());
List<Map> body1 = (List<Map>) resp1.get("body");
assertEquals(0, body1.size());
// Enable "name1" package
helium.enable("name1", "artifact1");
CloseableHttpResponse get2 = httpGet("/helium/enabledPackage");
assertThat(get2, isAllowed());
Map<String, Object> resp2 = gson.fromJson(EntityUtils.toString(get2.getEntity(), StandardCharsets.UTF_8),
new TypeToken<Map<String, Object>>() { }.getType());
List<Map> body2 = (List<Map>) resp2.get("body");
assertEquals(1, body2.size());
Map pkg = (Map) body2.get(0).get("pkg");
assertEquals("name1", pkg.get("name"));
get1.close();
get2.close();
}
|
private PiMulticastGroupEntry(int groupId, Set<PiPreReplica> replicas) {
this.groupId = groupId;
this.replicas = replicas;
}
|
@Test
public void testPiMulticastGroupEntry() {
assertThat("Invalid group ID",
group1.groupId(), is(groupId1));
assertThat("Invalid replicas size",
group1.replicas().size(), is(2));
assertThat("Invalid replicas",
group1.replicas(), contains(replica1, replica2));
assertThat("Invalid group ID",
group2.groupId(), is(groupId2));
assertThat("Invalid replicas size",
group2.replicas().size(), is(3));
assertThat("Invalid replicas",
group2.replicas(), contains(replica1, replica2, replica3));
}
|
public Map<NodeId, Set<NodeLabel>> getNodeLabelsInfo() {
Map<NodeId, Set<NodeLabel>> nodeToLabels =
generateNodeLabelsInfoPerNode(NodeLabel.class);
return nodeToLabels;
}
|
@Test
@Timeout(5000)
void testGetNodeLabelsInfo() throws IOException {
mgr.addToCluserNodeLabels(Arrays.asList(NodeLabel.newInstance("p1", false),
NodeLabel.newInstance("p2", true), NodeLabel.newInstance("p3", false)));
mgr.addLabelsToNode(ImmutableMap.of(toNodeId("n1"), toSet("p2")));
mgr.addLabelsToNode(ImmutableMap.of(toNodeId("n2"), toSet("p3")));
assertLabelInfoMapEquals(mgr.getNodeLabelsInfo(), ImmutableMap.of(
toNodeId("n1"), toSet(NodeLabel.newInstance("p2", true)),
toNodeId("n2"), toSet(NodeLabel.newInstance("p3", false))));
}
|
@Override
public synchronized List<PrivilegedOperation> preStart(Container container)
throws ResourceHandlerException {
String containerIdStr = container.getContainerId().toString();
// Assign Gpus to container if requested some.
GpuResourceAllocator.GpuAllocation allocation = gpuAllocator.assignGpus(
container);
// Create device cgroups for the container
cGroupsHandler.createCGroup(CGroupsHandler.CGroupController.DEVICES,
containerIdStr);
if (!OCIContainerRuntime.isOCICompliantContainerRequested(
nmContext.getConf(),
container.getLaunchContext().getEnvironment())) {
// Write to devices cgroup only for non-docker container. The reason is
// docker engine runtime runc do the devices cgroups initialize in the
// pre-hook, see:
// https://github.com/opencontainers/runc/blob/master/libcontainer/configs/device_defaults.go
//
// YARN by default runs docker container inside cgroup, if we setup cgroups
// devices.deny for the parent cgroup for launched container, we can see
// errors like: failed to write c *:* m to devices.allow:
// write path-to-parent-cgroup/<container-id>/devices.allow:
// operation not permitted.
//
// To avoid this happen, if docker is requested when container being
// launched, we will not setup devices.deny for the container. Instead YARN
// will pass --device parameter to docker engine. See NvidiaDockerV1CommandPlugin
try {
// Execute c-e to setup GPU isolation before launch the container
PrivilegedOperation privilegedOperation = new PrivilegedOperation(
PrivilegedOperation.OperationType.GPU,
Arrays.asList(CONTAINER_ID_CLI_OPTION, containerIdStr));
if (!allocation.getDeniedGPUs().isEmpty()) {
List<Integer> minorNumbers = new ArrayList<>();
for (GpuDevice deniedGpu : allocation.getDeniedGPUs()) {
minorNumbers.add(deniedGpu.getMinorNumber());
}
privilegedOperation.appendArgs(Arrays.asList(EXCLUDED_GPUS_CLI_OPTION,
StringUtils.join(",", minorNumbers)));
}
privilegedOperationExecutor.executePrivilegedOperation(
privilegedOperation, true);
} catch (PrivilegedOperationException e) {
cGroupsHandler.deleteCGroup(CGroupsHandler.CGroupController.DEVICES,
containerIdStr);
LOG.warn("Could not update cgroup for container", e);
throw new ResourceHandlerException(e);
}
List<PrivilegedOperation> ret = new ArrayList<>();
ret.add(new PrivilegedOperation(
PrivilegedOperation.OperationType.ADD_PID_TO_CGROUP,
PrivilegedOperation.CGROUP_ARG_PREFIX + cGroupsHandler
.getPathForCGroupTasks(CGroupsHandler.CGroupController.DEVICES,
containerIdStr)));
return ret;
}
return null;
}
|
@Test
public void testAllocationStored() throws Exception {
initializeGpus();
/* Start container 1, asks 3 containers */
Container container = mockContainerWithGpuRequest(1,
createResourceRequest(3));
gpuResourceHandler.preStart(container);
verify(mockNMStateStore).storeAssignedResources(container,
ResourceInformation.GPU_URI, Arrays
.asList(new GpuDevice(0, 0), new GpuDevice(1, 1),
new GpuDevice(2, 3)));
// Only device=4 will be blocked.
verifyDeniedDevices(getContainerId(1),
Collections.singletonList(new GpuDevice(3, 4)));
/* Start container 2, ask 0 container, succeeded */
container = mockContainerWithGpuRequest(2, createResourceRequest(0));
gpuResourceHandler.preStart(container);
verifyDeniedDevices(getContainerId(2), Arrays
.asList(new GpuDevice(0, 0), new GpuDevice(1, 1), new GpuDevice(2, 3),
new GpuDevice(3, 4)));
assertEquals("Number of GPU device allocations is not the expected!", 0,
container.getResourceMappings()
.getAssignedResources(ResourceInformation.GPU_URI).size());
// Store assigned resource will not be invoked.
verify(mockNMStateStore, never()).storeAssignedResources(
eq(container), eq(ResourceInformation.GPU_URI), anyList());
}
|
public List<ChangeStreamRecord> toChangeStreamRecords(
PartitionMetadata partition,
ChangeStreamResultSet resultSet,
ChangeStreamResultSetMetadata resultSetMetadata) {
if (this.isPostgres()) {
// In PostgresQL, change stream records are returned as JsonB.
return Collections.singletonList(
toChangeStreamRecordJson(partition, resultSet.getPgJsonb(0), resultSetMetadata));
}
// In GoogleSQL, change stream records are returned as an array of structs.
return resultSet.getCurrentRowAsStruct().getStructList(0).stream()
.flatMap(struct -> toChangeStreamRecord(partition, struct, resultSetMetadata))
.collect(Collectors.toList());
}
|
@Test
public void testMappingUpdateStructJsonNewValuesToDataChangeRecord() {
final DataChangeRecord dataChangeRecord =
new DataChangeRecord(
"partitionToken",
Timestamp.ofTimeSecondsAndNanos(10L, 20),
"serverTransactionId",
true,
"1",
"tableName",
Arrays.asList(
new ColumnType("column1", new TypeCode("{\"code\":\"INT64\"}"), true, 1L),
new ColumnType("column2", new TypeCode("{\"code\":\"BYTES\"}"), false, 2L)),
Collections.singletonList(
new Mod("{\"column1\":\"value1\"}", null, "{\"column2\":\"newValue2\"}")),
ModType.UPDATE,
ValueCaptureType.NEW_VALUES,
10L,
2L,
"transactionTag",
true,
null);
final String jsonString = recordToJson(dataChangeRecord, false, false);
assertNotNull(jsonString);
ChangeStreamResultSet resultSet = mock(ChangeStreamResultSet.class);
when(resultSet.getPgJsonb(0)).thenReturn(jsonString);
assertEquals(
Collections.singletonList(dataChangeRecord),
mapperPostgres.toChangeStreamRecords(partition, resultSet, resultSetMetadata));
}
|
public static Optional<Map<?, ?>> findCachedServices(final Class<?> spiClass, final Collection<?> types) {
return Optional.ofNullable(cache.get()).map(optional -> optional.get(new Key(spiClass, types)));
}
|
@Test
void assertNotFindCachedServices() {
assertFalse(OrderedServicesCache.findCachedServices(OrderedSPIFixture.class, Collections.singleton(new OrderedInterfaceFixtureImpl())).isPresent());
}
|
protected boolean removeService(Service service) {
LOG.debug("Removing service {}", service.getName());
synchronized (serviceList) {
return serviceList.remove(service);
}
}
|
@Test
public void testRemoveService() {
CompositeService testService = new CompositeService("TestService") {
@Override
public void serviceInit(Configuration conf) {
Integer notAService = new Integer(0);
assertFalse("Added an integer as a service",
addIfService(notAService));
Service service1 = new AbstractService("Service1") {};
addIfService(service1);
Service service2 = new AbstractService("Service2") {};
addIfService(service2);
Service service3 = new AbstractService("Service3") {};
addIfService(service3);
removeService(service1);
}
};
testService.init(new Configuration());
assertEquals("Incorrect number of services",
2, testService.getServices().size());
}
|
public static ReadRows readRows() {
return new AutoValue_JdbcIO_ReadRows.Builder()
.setFetchSize(DEFAULT_FETCH_SIZE)
.setOutputParallelization(true)
.setStatementPreparator(ignored -> {})
.build();
}
|
@Test
public void testReadRowsWithNumericFields() {
PCollection<Row> rows =
pipeline.apply(
JdbcIO.readRows()
.withDataSourceConfiguration(DATA_SOURCE_CONFIGURATION)
.withQuery(
String.format(
"SELECT CAST(1 AS NUMERIC(1, 0)) AS T1 FROM %s WHERE name = ?",
READ_TABLE_NAME))
.withStatementPreparator(
preparedStatement ->
preparedStatement.setString(1, TestRow.getNameForSeed(1))));
Schema expectedSchema =
Schema.of(
Schema.Field.of(
"T1", FieldType.logicalType(FixedPrecisionNumeric.of(1, 0)).withNullable(false)));
assertEquals(expectedSchema, rows.getSchema());
PCollection<Row> output = rows.apply(Select.fieldNames("T1"));
PAssert.that(output)
.containsInAnyOrder(
ImmutableList.of(
Row.withSchema(expectedSchema).addValues(BigDecimal.valueOf(1)).build()));
pipeline.run();
}
|
public Set<String> getClusters(Service service) {
return serviceClusterIndex.getOrDefault(service, new HashSet<>());
}
|
@Test
void testGetClusters() {
Set<String> clusters = serviceStorage.getClusters(SERVICE);
assertNotNull(clusters);
for (String cluster : clusters) {
assertEquals(NACOS, cluster);
}
}
|
@Override
public Collection<SQLTokenGenerator> getSQLTokenGenerators() {
Collection<SQLTokenGenerator> result = new LinkedList<>();
addSQLTokenGenerator(result, new ShardingTableTokenGenerator(shardingRule));
addSQLTokenGenerator(result, new ShardingDistinctProjectionPrefixTokenGenerator());
addSQLTokenGenerator(result, new ShardingProjectionsTokenGenerator());
addSQLTokenGenerator(result, new ShardingOrderByTokenGenerator());
addSQLTokenGenerator(result, new ShardingAggregationDistinctTokenGenerator());
addSQLTokenGenerator(result, new ShardingIndexTokenGenerator(shardingRule));
addSQLTokenGenerator(result, new ShardingConstraintTokenGenerator(shardingRule));
addSQLTokenGenerator(result, new ShardingOffsetTokenGenerator());
addSQLTokenGenerator(result, new ShardingRowCountTokenGenerator());
addSQLTokenGenerator(result, new GeneratedKeyInsertColumnTokenGenerator());
addSQLTokenGenerator(result, new GeneratedKeyForUseDefaultInsertColumnsTokenGenerator());
addSQLTokenGenerator(result, new GeneratedKeyAssignmentTokenGenerator());
addSQLTokenGenerator(result, new ShardingInsertValuesTokenGenerator());
addSQLTokenGenerator(result, new GeneratedKeyInsertValuesTokenGenerator());
addSQLTokenGenerator(result, new ShardingRemoveTokenGenerator());
addSQLTokenGenerator(result, new ShardingCursorTokenGenerator(shardingRule));
addSQLTokenGenerator(result, new ShardingFetchDirectionTokenGenerator());
return result;
}
|
@Test
void assertGetSQLTokenGenerators() throws Exception {
when(routeContext.containsTableSharding()).thenReturn(true);
SelectStatementContext sqlStatementContext = mock(SelectStatementContext.class, RETURNS_DEEP_STUBS);
when(sqlStatementContext.getProjectionsContext().getAggregationProjections().isEmpty()).thenReturn(false);
ShardingTokenGenerateBuilder shardingTokenGenerateBuilder = new ShardingTokenGenerateBuilder(shardingRule, routeContext, sqlStatementContext);
Collection<SQLTokenGenerator> sqlTokenGenerators = shardingTokenGenerateBuilder.getSQLTokenGenerators();
assertThat(sqlTokenGenerators.size(), is(4));
Iterator<SQLTokenGenerator> iterator = sqlTokenGenerators.iterator();
SQLTokenGenerator tableTokenGenerator = iterator.next();
assertThat(tableTokenGenerator, instanceOf(ShardingTableTokenGenerator.class));
assertSqlTokenGenerator(tableTokenGenerator);
SQLTokenGenerator distinctProjectionPrefixTokenGenerator = iterator.next();
assertThat(distinctProjectionPrefixTokenGenerator, instanceOf(ShardingDistinctProjectionPrefixTokenGenerator.class));
assertSqlTokenGenerator(distinctProjectionPrefixTokenGenerator);
SQLTokenGenerator aggregationDistinctTokenGenerator = iterator.next();
assertThat(aggregationDistinctTokenGenerator, instanceOf(ShardingAggregationDistinctTokenGenerator.class));
assertSqlTokenGenerator(aggregationDistinctTokenGenerator);
SQLTokenGenerator shardingRemoveTokenGenerator = iterator.next();
assertThat(shardingRemoveTokenGenerator, instanceOf(ShardingRemoveTokenGenerator.class));
assertSqlTokenGenerator(shardingRemoveTokenGenerator);
}
|
@Override
public void write(final Path file, final Distribution distribution, final LoginCallback prompt) throws BackgroundException {
final Path container = session.getFeature(PathContainerService.class).getContainer(file);
try {
if(null == distribution.getId()) {
// No existing configuration
if(log.isDebugEnabled()) {
log.debug(String.format("No existing distribution found for method %s", distribution.getMethod()));
}
if(distribution.getMethod().equals(Distribution.STREAMING)) {
distribution.setId(this.createStreamingDistribution(container, distribution).getId());
}
else if(distribution.getMethod().equals(Distribution.DOWNLOAD)) {
distribution.setId(this.createDownloadDistribution(container, distribution).getId());
}
else if(distribution.getMethod().equals(Distribution.CUSTOM)
|| distribution.getMethod().equals(Distribution.WEBSITE_CDN)) {
distribution.setId(this.createCustomDistribution(container, distribution).getId());
}
}
else {
if(distribution.getMethod().equals(Distribution.DOWNLOAD)) {
distribution.setEtag(this.updateDownloadDistribution(container, distribution).getETag());
}
else if(distribution.getMethod().equals(Distribution.STREAMING)) {
distribution.setEtag(this.updateStreamingDistribution(container, distribution).getETag());
}
else if(distribution.getMethod().equals(Distribution.CUSTOM)
|| distribution.getMethod().equals(Distribution.WEBSITE_CDN)) {
distribution.setEtag(this.updateCustomDistribution(container, distribution).getETag());
}
}
}
catch(AmazonClientException e) {
throw new AmazonServiceExceptionMappingService().map("Cannot write CDN configuration", e);
}
}
|
@Test
public void testWriteNewStreaming() throws Exception {
final AtomicBoolean set = new AtomicBoolean();
final CloudFrontDistributionConfiguration configuration = new CloudFrontDistributionConfiguration(session,
new S3LocationFeature(session), new DisabledX509TrustManager(), new DefaultX509KeyManager()) {
@Override
protected UpdateStreamingDistributionResult updateStreamingDistribution(final Path container, final Distribution distribution) {
fail();
return null;
}
@Override
protected StreamingDistribution createStreamingDistribution(final Path container, final Distribution distribution) {
set.set(true);
return new StreamingDistribution().withId("");
}
};
final Path container = new Path(UUID.randomUUID().toString(), EnumSet.of(Path.Type.directory, Path.Type.volume));
final Distribution distribution = new Distribution(Distribution.STREAMING, true);
configuration.write(container, distribution, new DisabledLoginCallback());
assertTrue(set.get());
}
|
@Override
public int read() {
return (mPosition < mLimit) ? (mData[mPosition++] & 0xff) : -1;
}
|
@Test
void testWrongLength() {
Assertions.assertThrows(IndexOutOfBoundsException.class, () -> {
UnsafeByteArrayInputStream stream = new UnsafeByteArrayInputStream("abc".getBytes());
stream.read(new byte[1], 0, 100);
});
}
|
public boolean setStatus(DefaultIssue issue, String status, IssueChangeContext context) {
if (!Objects.equals(status, issue.status())) {
issue.setFieldChange(context, STATUS, issue.status(), status);
issue.setStatus(status);
issue.setUpdateDate(context.date());
issue.setChanged(true);
issue.setSendNotifications(true);
return true;
}
return false;
}
|
@Test
void set_status() {
boolean updated = underTest.setStatus(issue, Issue.STATUS_OPEN, context);
assertThat(updated).isTrue();
assertThat(issue.status()).isEqualTo(Issue.STATUS_OPEN);
FieldDiffs.Diff diff = issue.currentChange().get(STATUS);
assertThat(diff.oldValue()).isNull();
assertThat(diff.newValue()).isEqualTo(Issue.STATUS_OPEN);
assertThat(issue.mustSendNotifications()).isTrue();
}
|
public static byte[] readFileBytes(File file) {
if (file.exists()) {
String result = readFile(file);
if (result != null) {
return ByteUtils.toBytes(result);
}
}
return null;
}
|
@Test
void testReadFileBytes() {
assertNotNull(DiskUtils.readFileBytes(testFile));
}
|
<T extends PipelineOptions> T as(Class<T> iface) {
checkNotNull(iface);
checkArgument(iface.isInterface(), "Not an interface: %s", iface);
T existingOption = computedProperties.interfaceToProxyCache.getInstance(iface);
if (existingOption == null) {
synchronized (this) {
// double check
existingOption = computedProperties.interfaceToProxyCache.getInstance(iface);
if (existingOption == null) {
Registration<T> registration =
PipelineOptionsFactory.CACHE
.get()
.validateWellFormed(iface, computedProperties.knownInterfaces);
List<PropertyDescriptor> propertyDescriptors = registration.getPropertyDescriptors();
Class<T> proxyClass = registration.getProxyClass();
existingOption =
InstanceBuilder.ofType(proxyClass)
.fromClass(proxyClass)
.withArg(InvocationHandler.class, this)
.build();
computedProperties =
computedProperties.updated(iface, existingOption, propertyDescriptors);
}
}
}
return existingOption;
}
|
@Test
public void testJsonConversionForPartiallySerializedValues() throws Exception {
SimpleTypes options = PipelineOptionsFactory.as(SimpleTypes.class);
options.setInteger(5);
SimpleTypes options2 = serializeDeserialize(SimpleTypes.class, options);
options2.setString("TestValue");
SimpleTypes options3 = serializeDeserialize(SimpleTypes.class, options2);
assertEquals(5, options3.getInteger());
assertEquals("TestValue", options3.getString());
}
|
@Override
public ZIPCompressionOutputStream createOutputStream( OutputStream out ) throws IOException {
return new ZIPCompressionOutputStream( out, this );
}
|
@Test
public void testCreateOutputStream() throws IOException {
ZIPCompressionProvider provider = (ZIPCompressionProvider) factory.getCompressionProviderByName( PROVIDER_NAME );
ByteArrayOutputStream out = new ByteArrayOutputStream();
ZipOutputStream zos = new ZipOutputStream( out );
ZIPCompressionOutputStream outStream = new ZIPCompressionOutputStream( out, provider );
assertNotNull( outStream );
ZIPCompressionOutputStream ncis = provider.createOutputStream( out );
assertNotNull( ncis );
ZIPCompressionOutputStream ncis2 = provider.createOutputStream( zos );
assertNotNull( ncis2 );
}
|
public static DynamicVoter parse(String input) {
input = input.trim();
int atIndex = input.indexOf("@");
if (atIndex < 0) {
throw new IllegalArgumentException("No @ found in dynamic voter string.");
}
if (atIndex == 0) {
throw new IllegalArgumentException("Invalid @ at beginning of dynamic voter string.");
}
String idString = input.substring(0, atIndex);
int nodeId;
try {
nodeId = Integer.parseInt(idString);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Failed to parse node id in dynamic voter string.", e);
}
if (nodeId < 0) {
throw new IllegalArgumentException("Invalid negative node id " + nodeId +
" in dynamic voter string.");
}
input = input.substring(atIndex + 1);
if (input.isEmpty()) {
throw new IllegalArgumentException("No hostname found after node id.");
}
String host;
if (input.startsWith("[")) {
int endBracketIndex = input.indexOf("]");
if (endBracketIndex < 0) {
throw new IllegalArgumentException("Hostname began with left bracket, but no right " +
"bracket was found.");
}
host = input.substring(1, endBracketIndex);
input = input.substring(endBracketIndex + 1);
} else {
int endColonIndex = input.indexOf(":");
if (endColonIndex < 0) {
throw new IllegalArgumentException("No colon following hostname could be found.");
}
host = input.substring(0, endColonIndex);
input = input.substring(endColonIndex);
}
if (!input.startsWith(":")) {
throw new IllegalArgumentException("Port section must start with a colon.");
}
input = input.substring(1);
int endColonIndex = input.indexOf(":");
if (endColonIndex < 0) {
throw new IllegalArgumentException("No colon following port could be found.");
}
String portString = input.substring(0, endColonIndex);
int port;
try {
port = Integer.parseInt(portString);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Failed to parse port in dynamic voter string.", e);
}
if (port < 0 || port > 65535) {
throw new IllegalArgumentException("Invalid port " + port + " in dynamic voter string.");
}
String directoryIdString = input.substring(endColonIndex + 1);
Uuid directoryId;
try {
directoryId = Uuid.fromString(directoryIdString);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Failed to parse directory ID in dynamic voter string.", e);
}
return new DynamicVoter(directoryId, nodeId, host, port);
}
|
@Test
public void testNoColonFollowingHostname() {
assertEquals("No colon following hostname could be found.",
assertThrows(IllegalArgumentException.class,
() -> DynamicVoter.parse("2@localhost8020K90IZ-0DRNazJ49kCZ1EMQ")).
getMessage());
}
|
@Override
public void streamRequest(StreamRequest request, Callback<StreamResponse> callback)
{
streamRequest(request, new RequestContext(), callback);
}
|
@Test(invocationCount = 3, dataProvider = "isD2Async")
public void testStreamRequestWithNoIsFullRequest(boolean isD2Async) throws Exception {
int responseDelayNano = 100000000; //1s till response comes back
int backupDelayNano = 50000000; // make backup request after 0.5 second
Deque<URI> hostsReceivingRequest = new ConcurrentLinkedDeque<>();
BackupRequestsClient client =
createAlwaysBackupClientWithHosts(Arrays.asList("http://test1.com:123", "http://test2.com:123"),
hostsReceivingRequest, responseDelayNano, backupDelayNano, isD2Async);
URI uri = URI.create("d2://testService");
// if there is no IS_FULL_REQUEST set, backup requests will not happen
StreamRequest streamRequest =
new StreamRequestBuilder(uri).build(EntityStreams.newEntityStream(new ByteStringWriter(CONTENT)));
RequestContext context = new RequestContext();
context.putLocalAttr(R2Constants.OPERATION, "get");
RequestContext context1 = context.clone();
CountDownLatch latch = new CountDownLatch(1);
AtomicReference<AssertionError> failure = new AtomicReference<>();
client.streamRequest(streamRequest, context1, new Callback<StreamResponse>() {
@Override
public void onError(Throwable e) {
failure.set(new AssertionError("Callback onError"));
latch.countDown();
}
@Override
public void onSuccess(StreamResponse result) {
try {
assertEquals(result.getStatus(), 200);
assertEquals(result.getHeader("buffered"), "false");
assertEquals(hostsReceivingRequest.size(), 1);
assertEquals(new HashSet<>(hostsReceivingRequest).size(), 1);
hostsReceivingRequest.clear();
} catch (AssertionError e) {
failure.set(e);
}
latch.countDown();
}
});
latch.await(2, TimeUnit.SECONDS);
if (failure.get() != null) {
throw failure.get();
}
}
|
public static boolean isTopic(String destinationName) {
if (destinationName == null) {
throw new IllegalArgumentException("destinationName is null");
}
return destinationName.startsWith("topic:");
}
|
@Test
public void testIsTopic() {
assertTrue(DestinationNameParser.isTopic("topic:foo.DestinationNameParserTest"));
assertFalse(DestinationNameParser.isTopic("queue:bar.DestinationNameParserTest"));
assertFalse(DestinationNameParser.isTopic("bar"));
}
|
@VisibleForTesting
static void startSqlGateway(PrintStream stream, String[] args) {
SqlGatewayOptions cliOptions = SqlGatewayOptionsParser.parseSqlGatewayOptions(args);
if (cliOptions.isPrintHelp()) {
SqlGatewayOptionsParser.printHelpSqlGateway(stream);
return;
}
// startup checks and logging
EnvironmentInformation.logEnvironmentInfo(LOG, "SqlGateway", args);
SignalHandler.register(LOG);
JvmShutdownSafeguard.installAsShutdownHook(LOG);
DefaultContext defaultContext =
DefaultContext.load(
ConfigurationUtils.createConfiguration(cliOptions.getDynamicConfigs()),
Collections.emptyList(),
true);
SqlGateway gateway =
new SqlGateway(
defaultContext.getFlinkConfig(), SessionManager.create(defaultContext));
try {
Runtime.getRuntime().addShutdownHook(new ShutdownThread(gateway));
gateway.start();
gateway.waitUntilStop();
} catch (Throwable t) {
// User uses ctrl + c to cancel the Gateway manually
if (t instanceof InterruptedException) {
LOG.info("Caught " + t.getClass().getSimpleName() + ". Shutting down.");
return;
}
// make space in terminal
stream.println();
stream.println();
if (t instanceof SqlGatewayException) {
// Exception that the gateway can not handle.
throw (SqlGatewayException) t;
} else {
LOG.error(
"SqlGateway must stop. Unexpected exception. This is a bug. Please consider filing an issue.",
t);
throw new SqlGatewayException(
"Unexpected exception. This is a bug. Please consider filing an issue.", t);
}
} finally {
gateway.stop();
}
}
|
@Test
void testPrintStartGatewayHelp() {
String[] args = new String[] {"--help"};
SqlGateway.startSqlGateway(new PrintStream(output), args);
assertThat(output.toString())
.contains(
"Start the Flink SQL Gateway as a daemon to submit Flink SQL.\n"
+ "\n"
+ " Syntax: start [OPTIONS]\n"
+ " -D <property=value> Use value for given property\n"
+ " -h,--help Show the help message with descriptions of all\n"
+ " options.\n\n");
}
|
@Override
@MethodNotAvailable
public Map<K, Object> executeOnKeys(Set<K> keys, com.hazelcast.map.EntryProcessor entryProcessor) {
throw new MethodNotAvailableException();
}
|
@Test(expected = MethodNotAvailableException.class)
public void testExecuteOnKeys() {
Set<Integer> keys = new HashSet<>(singleton(23));
adapter.executeOnKeys(keys, new IMapReplaceEntryProcessor("value", "newValue"));
}
|
public static CacheStats of(@NonNegative long hitCount, @NonNegative long missCount,
@NonNegative long loadSuccessCount, @NonNegative long loadFailureCount,
@NonNegative long totalLoadTime, @NonNegative long evictionCount,
@NonNegative long evictionWeight) {
// Many parameters of the same type in a row is a bad thing, but this class is not constructed
// by end users and is too fine-grained for a builder.
return new CacheStats(hitCount, missCount, loadSuccessCount,
loadFailureCount, totalLoadTime, evictionCount, evictionWeight);
}
|
@Test(dataProvider = "badArgs")
public void invalid(int hitCount, int missCount, int loadSuccessCount, int loadFailureCount,
int totalLoadTime, int evictionCount, int evictionWeight) {
assertThrows(IllegalArgumentException.class, () -> {
CacheStats.of(hitCount, missCount, loadSuccessCount,
loadFailureCount, totalLoadTime, evictionCount, evictionWeight);
});
}
|
@Override
public int get(PageId pageId, int bytesToRead, CacheScope scope) {
boolean seen = false;
for (int i = 0; i < mSegmentBloomFilters.length(); ++i) {
seen |= mSegmentBloomFilters.get(i).mightContain(pageId);
}
if (seen) {
mShadowCachePageHit.getAndIncrement();
mShadowCacheByteHit.getAndAdd(bytesToRead);
}
mShadowCachePageRead.getAndIncrement();
mShadowCacheByteRead.getAndAdd(bytesToRead);
return seen ? bytesToRead : 0;
}
|
@Test
public void getNotExist() throws Exception {
assertEquals(0, mCacheManager.get(PAGE_ID1, PAGE1_BYTES, SCOPE1));
}
|
public StoreMode getStoreMode() {
return storeMode;
}
|
@Test
public void testGetStoreMode() {
metadata.setStoreMode(StoreMode.RAFT);
Assertions.assertEquals(StoreMode.RAFT, metadata.getStoreMode());
}
|
public static void main(String[] args) {
String relDir = args.length == 1 ? args[0] : "";
GraphHopper hopper = createGraphHopperInstance(relDir + "core/files/andorra.osm.pbf");
routing(hopper);
speedModeVersusFlexibleMode(hopper);
alternativeRoute(hopper);
customizableRouting(relDir + "core/files/andorra.osm.pbf");
// release resources to properly shutdown or start a new instance
hopper.close();
}
|
@Test
public void main() {
Helper.removeDir(new File("target/routing-graph-cache"));
RoutingExample.main(new String[]{"../"});
Helper.removeDir(new File("target/routing-tc-graph-cache"));
RoutingExampleTC.main(new String[]{"../"});
}
|
public CrossFilterConfig getCross() {
return cross;
}
|
@Test
public void testCrossFilterConfig() {
ShenyuConfig.CrossFilterConfig cross = config.getCross();
cross.setAllowCredentials(false);
cross.setEnabled(false);
cross.setAllowedExpose("test");
cross.setAllowedMethods("test");
cross.setAllowedHeaders("test");
cross.setMaxAge("test");
String allowedExpose = cross.getAllowedExpose();
String allowedHeaders = cross.getAllowedHeaders();
ShenyuConfig.CrossFilterConfig.AllowedOriginConfig allowedOrigin = cross.getAllowedOrigin();
Boolean enabled = cross.getEnabled();
String maxAge = cross.getMaxAge();
String allowedMethods = cross.getAllowedMethods();
notEmptyElements(allowedExpose, allowedHeaders, allowedOrigin, enabled, maxAge, allowedMethods);
}
|
public Future<Collection<Integer>> resizeAndReconcilePvcs(KafkaStatus kafkaStatus, List<PersistentVolumeClaim> pvcs) {
Set<Integer> podIdsToRestart = new HashSet<>();
List<Future<Void>> futures = new ArrayList<>(pvcs.size());
for (PersistentVolumeClaim desiredPvc : pvcs) {
Future<Void> perPvcFuture = pvcOperator.getAsync(reconciliation.namespace(), desiredPvc.getMetadata().getName())
.compose(currentPvc -> {
if (currentPvc == null || currentPvc.getStatus() == null || !"Bound".equals(currentPvc.getStatus().getPhase())) {
// This branch handles the following conditions:
// * The PVC doesn't exist yet, we should create it
// * The PVC is not Bound, we should reconcile it
return pvcOperator.reconcile(reconciliation, reconciliation.namespace(), desiredPvc.getMetadata().getName(), desiredPvc)
.map((Void) null);
} else if (currentPvc.getStatus().getConditions().stream().anyMatch(cond -> "Resizing".equals(cond.getType()) && "true".equals(cond.getStatus().toLowerCase(Locale.ENGLISH)))) {
// The PVC is Bound, but it is already resizing => Nothing to do, we should let it resize
LOGGER.debugCr(reconciliation, "The PVC {} is resizing, nothing to do", desiredPvc.getMetadata().getName());
return Future.succeededFuture();
} else if (currentPvc.getStatus().getConditions().stream().anyMatch(cond -> "FileSystemResizePending".equals(cond.getType()) && "true".equals(cond.getStatus().toLowerCase(Locale.ENGLISH)))) {
// The PVC is Bound and resized but waiting for FS resizing => We need to restart the pod which is using it
podIdsToRestart.add(getPodIndexFromPvcName(desiredPvc.getMetadata().getName()));
LOGGER.infoCr(reconciliation, "The PVC {} is waiting for file system resizing and the pod using it might need to be restarted.", desiredPvc.getMetadata().getName());
return Future.succeededFuture();
} else {
// The PVC is Bound and resizing is not in progress => We should check if the SC supports resizing and check if size changed
Long currentSize = StorageUtils.convertToMillibytes(currentPvc.getSpec().getResources().getRequests().get("storage"));
Long desiredSize = StorageUtils.convertToMillibytes(desiredPvc.getSpec().getResources().getRequests().get("storage"));
if (!currentSize.equals(desiredSize)) {
// The sizes are different => we should resize (shrinking will be handled in StorageDiff, so we do not need to check that)
return resizePvc(kafkaStatus, currentPvc, desiredPvc);
} else {
// size didn't change, just reconcile
return pvcOperator.reconcile(reconciliation, reconciliation.namespace(), desiredPvc.getMetadata().getName(), desiredPvc)
.map((Void) null);
}
}
});
futures.add(perPvcFuture);
}
return Future.all(futures)
.map(podIdsToRestart);
}
|
@Test
public void testVolumesResizing(VertxTestContext context) {
List<PersistentVolumeClaim> pvcs = List.of(
createPvc("data-pod-0"),
createPvc("data-pod-1"),
createPvc("data-pod-2")
);
ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false);
// Mock the PVC Operator
PvcOperator mockPvcOps = supplier.pvcOperations;
when(mockPvcOps.getAsync(eq(NAMESPACE), ArgumentMatchers.startsWith("data-")))
.thenAnswer(invocation -> {
String pvcName = invocation.getArgument(1);
PersistentVolumeClaim currentPvc = pvcs.stream().filter(pvc -> pvcName.equals(pvc.getMetadata().getName())).findFirst().orElse(null);
if (currentPvc != null) {
PersistentVolumeClaim pvcWithStatus = new PersistentVolumeClaimBuilder(currentPvc)
.withNewStatus()
.withPhase("Bound")
.withConditions(new PersistentVolumeClaimConditionBuilder()
.withStatus("True")
.withType("Resizing")
.build())
.withCapacity(Map.of("storage", new Quantity("50Gi", null)))
.endStatus()
.build();
return Future.succeededFuture(pvcWithStatus);
} else {
return Future.succeededFuture();
}
});
ArgumentCaptor<PersistentVolumeClaim> pvcCaptor = ArgumentCaptor.forClass(PersistentVolumeClaim.class);
when(mockPvcOps.reconcile(any(), anyString(), anyString(), pvcCaptor.capture())).thenReturn(Future.succeededFuture());
// Mock the StorageClass Operator
StorageClassOperator mockSco = supplier.storageClassOperations;
when(mockSco.getAsync(eq(STORAGE_CLASS_NAME))).thenReturn(Future.succeededFuture(RESIZABLE_STORAGE_CLASS));
// Reconcile the PVCs
PvcReconciler reconciler = new PvcReconciler(
new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME),
mockPvcOps,
mockSco
);
Checkpoint async = context.checkpoint();
reconciler.resizeAndReconcilePvcs(new KafkaStatus(), pvcs)
.onComplete(res -> {
assertThat(res.succeeded(), is(true));
assertThat(res.result().size(), is(0));
assertThat(pvcCaptor.getAllValues().size(), is(0));
async.flag();
});
}
|
public static Optional<Expression> convert(
org.apache.flink.table.expressions.Expression flinkExpression) {
if (!(flinkExpression instanceof CallExpression)) {
return Optional.empty();
}
CallExpression call = (CallExpression) flinkExpression;
Operation op = FILTERS.get(call.getFunctionDefinition());
if (op != null) {
switch (op) {
case IS_NULL:
return onlyChildAs(call, FieldReferenceExpression.class)
.map(FieldReferenceExpression::getName)
.map(Expressions::isNull);
case NOT_NULL:
return onlyChildAs(call, FieldReferenceExpression.class)
.map(FieldReferenceExpression::getName)
.map(Expressions::notNull);
case LT:
return convertFieldAndLiteral(Expressions::lessThan, Expressions::greaterThan, call);
case LT_EQ:
return convertFieldAndLiteral(
Expressions::lessThanOrEqual, Expressions::greaterThanOrEqual, call);
case GT:
return convertFieldAndLiteral(Expressions::greaterThan, Expressions::lessThan, call);
case GT_EQ:
return convertFieldAndLiteral(
Expressions::greaterThanOrEqual, Expressions::lessThanOrEqual, call);
case EQ:
return convertFieldAndLiteral(
(ref, lit) -> {
if (NaNUtil.isNaN(lit)) {
return Expressions.isNaN(ref);
} else {
return Expressions.equal(ref, lit);
}
},
call);
case NOT_EQ:
return convertFieldAndLiteral(
(ref, lit) -> {
if (NaNUtil.isNaN(lit)) {
return Expressions.notNaN(ref);
} else {
return Expressions.notEqual(ref, lit);
}
},
call);
case NOT:
return onlyChildAs(call, CallExpression.class)
.flatMap(FlinkFilters::convert)
.map(Expressions::not);
case AND:
return convertLogicExpression(Expressions::and, call);
case OR:
return convertLogicExpression(Expressions::or, call);
case STARTS_WITH:
return convertLike(call);
}
}
return Optional.empty();
}
|
@Test
public void testLessThan() {
UnboundPredicate<Integer> expected =
org.apache.iceberg.expressions.Expressions.lessThan("field1", 1);
Optional<org.apache.iceberg.expressions.Expression> actual =
FlinkFilters.convert(resolve(Expressions.$("field1").isLess(Expressions.lit(1))));
assertThat(actual).isPresent();
assertPredicatesMatch(expected, actual.get());
Optional<org.apache.iceberg.expressions.Expression> actual1 =
FlinkFilters.convert(resolve(Expressions.lit(1).isGreater(Expressions.$("field1"))));
assertThat(actual1).isPresent();
assertPredicatesMatch(expected, actual1.get());
}
|
static void populateSchemaFromListOfRanges(Schema toPopulate, List<RangeNode> ranges) {
Range range = consolidateRanges(ranges);
if (range != null) {
if (range.getLowEndPoint() != null) {
if (range.getLowEndPoint() instanceof BigDecimal bigDecimal) {
toPopulate.minimum(bigDecimal);
} else {
toPopulate.addExtension(DMNOASConstants.X_DMN_MINIMUM_VALUE, range.getLowEndPoint());
}
toPopulate.exclusiveMinimum(range.getLowBoundary() == Range.RangeBoundary.OPEN);
}
if (range.getHighEndPoint() != null) {
if (range.getHighEndPoint() instanceof BigDecimal bigDecimal) {
toPopulate.maximum(bigDecimal);
} else {
toPopulate.addExtension(DMNOASConstants.X_DMN_MAXIMUM_VALUE, range.getHighEndPoint());
}
toPopulate.exclusiveMaximum(range.getHighBoundary() == Range.RangeBoundary.OPEN);
}
}
}
|
@Test
void evaluateUnaryTestsForNumberRange() {
List<String> toRange = Arrays.asList("(>1)", "(<=10)");
List<RangeNode> ranges = getBaseNodes(toRange, RangeNode.class);
Schema toPopulate = OASFactory.createObject(Schema.class);
RangeNodeSchemaMapper.populateSchemaFromListOfRanges(toPopulate, ranges);
assertEquals(BigDecimal.ONE, toPopulate.getMinimum());
assertTrue(toPopulate.getExclusiveMinimum());
assertEquals(BigDecimal.TEN, toPopulate.getMaximum());
assertFalse(toPopulate.getExclusiveMaximum());
}
|
@SuppressWarnings("MethodLength")
static void dissectControlRequest(
final ArchiveEventCode eventCode,
final MutableDirectBuffer buffer,
final int offset,
final StringBuilder builder)
{
int encodedLength = dissectLogHeader(CONTEXT, eventCode, buffer, offset, builder);
HEADER_DECODER.wrap(buffer, offset + encodedLength);
encodedLength += MessageHeaderDecoder.ENCODED_LENGTH;
switch (eventCode)
{
case CMD_IN_CONNECT:
CONNECT_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendConnect(builder);
break;
case CMD_IN_CLOSE_SESSION:
CLOSE_SESSION_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendCloseSession(builder);
break;
case CMD_IN_START_RECORDING:
START_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStartRecording(builder);
break;
case CMD_IN_STOP_RECORDING:
STOP_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopRecording(builder);
break;
case CMD_IN_REPLAY:
REPLAY_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendReplay(builder);
break;
case CMD_IN_STOP_REPLAY:
STOP_REPLAY_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopReplay(builder);
break;
case CMD_IN_LIST_RECORDINGS:
LIST_RECORDINGS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendListRecordings(builder);
break;
case CMD_IN_LIST_RECORDINGS_FOR_URI:
LIST_RECORDINGS_FOR_URI_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendListRecordingsForUri(builder);
break;
case CMD_IN_LIST_RECORDING:
LIST_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendListRecording(builder);
break;
case CMD_IN_EXTEND_RECORDING:
EXTEND_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendExtendRecording(builder);
break;
case CMD_IN_RECORDING_POSITION:
RECORDING_POSITION_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendRecordingPosition(builder);
break;
case CMD_IN_TRUNCATE_RECORDING:
TRUNCATE_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendTruncateRecording(builder);
break;
case CMD_IN_STOP_RECORDING_SUBSCRIPTION:
STOP_RECORDING_SUBSCRIPTION_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopRecordingSubscription(builder);
break;
case CMD_IN_STOP_POSITION:
STOP_POSITION_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopPosition(builder);
break;
case CMD_IN_FIND_LAST_MATCHING_RECORD:
FIND_LAST_MATCHING_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendFindLastMatchingRecord(builder);
break;
case CMD_IN_LIST_RECORDING_SUBSCRIPTIONS:
LIST_RECORDING_SUBSCRIPTIONS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendListRecordingSubscriptions(builder);
break;
case CMD_IN_START_BOUNDED_REPLAY:
BOUNDED_REPLAY_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStartBoundedReplay(builder);
break;
case CMD_IN_STOP_ALL_REPLAYS:
STOP_ALL_REPLAYS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopAllReplays(builder);
break;
case CMD_IN_REPLICATE:
REPLICATE_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendReplicate(builder);
break;
case CMD_IN_STOP_REPLICATION:
STOP_REPLICATION_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopReplication(builder);
break;
case CMD_IN_START_POSITION:
START_POSITION_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStartPosition(builder);
break;
case CMD_IN_DETACH_SEGMENTS:
DETACH_SEGMENTS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendDetachSegments(builder);
break;
case CMD_IN_DELETE_DETACHED_SEGMENTS:
DELETE_DETACHED_SEGMENTS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendDeleteDetachedSegments(builder);
break;
case CMD_IN_PURGE_SEGMENTS:
PURGE_SEGMENTS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendPurgeSegments(builder);
break;
case CMD_IN_ATTACH_SEGMENTS:
ATTACH_SEGMENTS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendAttachSegments(builder);
break;
case CMD_IN_MIGRATE_SEGMENTS:
MIGRATE_SEGMENTS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendMigrateSegments(builder);
break;
case CMD_IN_AUTH_CONNECT:
AUTH_CONNECT_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendAuthConnect(builder);
break;
case CMD_IN_KEEP_ALIVE:
KEEP_ALIVE_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendKeepAlive(builder);
break;
case CMD_IN_TAGGED_REPLICATE:
TAGGED_REPLICATE_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendTaggedReplicate(builder);
break;
case CMD_IN_START_RECORDING2:
START_RECORDING_REQUEST2_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStartRecording2(builder);
break;
case CMD_IN_EXTEND_RECORDING2:
EXTEND_RECORDING_REQUEST2_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendExtendRecording2(builder);
break;
case CMD_IN_STOP_RECORDING_BY_IDENTITY:
STOP_RECORDING_BY_IDENTITY_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopRecordingByIdentity(builder);
break;
case CMD_IN_PURGE_RECORDING:
PURGE_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendPurgeRecording(builder);
break;
case CMD_IN_REPLICATE2:
REPLICATE_REQUEST2_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendReplicate2(builder);
break;
case CMD_IN_REQUEST_REPLAY_TOKEN:
REPLAY_TOKEN_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendReplayToken(builder);
break;
default:
builder.append(": unknown command");
}
}
|
@Test
void controlRequestRecordingPosition()
{
internalEncodeLogHeader(buffer, 0, 12, 32, () -> 10_000_000_000L);
final RecordingPositionRequestEncoder requestEncoder = new RecordingPositionRequestEncoder();
requestEncoder.wrapAndApplyHeader(buffer, LOG_HEADER_LENGTH, headerEncoder)
.controlSessionId(2)
.correlationId(3)
.recordingId(6);
dissectControlRequest(CMD_IN_RECORDING_POSITION, buffer, 0, builder);
assertEquals("[10.000000000] " + CONTEXT + ": " + CMD_IN_RECORDING_POSITION.name() + " [12/32]:" +
" controlSessionId=2" +
" correlationId=3" +
" recordingId=6",
builder.toString());
}
|
public static boolean isSameWeek(final Date date1, final Date date2, boolean isMon) {
if (date1 == null || date2 == null) {
throw new IllegalArgumentException("The date must not be null");
}
return CalendarUtil.isSameWeek(calendar(date1), calendar(date2), isMon);
}
|
@Test
public void isSameWeekTest() {
// 周六与周日比较
final boolean isSameWeek = DateUtil.isSameWeek(DateTime.of("2022-01-01", "yyyy-MM-dd"), DateTime.of("2022-01-02", "yyyy-MM-dd"), true);
assertTrue(isSameWeek);
// 周日与周一比较
final boolean isSameWeek1 = DateUtil.isSameWeek(DateTime.of("2022-01-02", "yyyy-MM-dd"), DateTime.of("2022-01-03", "yyyy-MM-dd"), false);
assertTrue(isSameWeek1);
// 跨月比较
final boolean isSameWeek2 = DateUtil.isSameWeek(DateTime.of("2021-12-29", "yyyy-MM-dd"), DateTime.of("2022-01-01", "yyyy-MM-dd"), true);
assertTrue(isSameWeek2);
}
|
@Override
public void handle(Connection connection, DatabaseCharsetChecker.State state) throws SQLException {
// Charset is a global setting on Oracle, it can't be set on a specified schema with a
// different value. To not block users who already have a SonarQube schema, charset
// is verified only on fresh installs but not on upgrades. Let's hope they won't face
// any errors related to charset if they didn't follow the UTF8 requirement when creating
// the schema in previous SonarQube versions.
if (state == DatabaseCharsetChecker.State.FRESH_INSTALL) {
LoggerFactory.getLogger(getClass()).info("Verify that database charset is UTF8");
expectUtf8(connection);
}
}
|
@Test
public void fresh_install_verifies_utf8_charset() throws Exception {
answerCharset("UTF8");
underTest.handle(connection, DatabaseCharsetChecker.State.FRESH_INSTALL);
}
|
@Override
public double getWeight(int source, int target) {
return graph[source][target];
}
|
@Test
public void testGetWeight() {
System.out.println("getWeight");
assertEquals(0.0, g1.getWeight(1, 2), 1E-10);
assertEquals(0.0, g1.getWeight(1, 1), 1E-10);
assertEquals(1.0, g2.getWeight(1, 2), 1E-10);
assertEquals(1.0, g2.getWeight(2, 1), 1E-10);
assertEquals(1.0, g3.getWeight(1, 2), 1E-10);
assertEquals(1.0, g3.getWeight(2, 1), 1E-10);
assertEquals(1.0, g3.getWeight(3, 2), 1E-10);
assertEquals(1.0, g3.getWeight(2, 3), 1E-10);
assertEquals(1.0, g3.getWeight(1, 3), 1E-10);
assertEquals(1.0, g3.getWeight(3, 1), 1E-10);
assertEquals(0.0, g3.getWeight(4, 2), 1E-10);
assertEquals(0.0, g4.getWeight(1, 4), 1E-10);
g4.addEdge(1, 4);
assertEquals(1.0, g4.getWeight(1, 4), 1E-10);
assertEquals(0.0, g5.getWeight(1, 2), 1E-10);
assertEquals(0.0, g5.getWeight(1, 1), 1E-10);
assertEquals(1.0, g6.getWeight(1, 2), 1E-10);
assertEquals(1.0, g6.getWeight(2, 1), 1E-10);
assertEquals(1.0, g7.getWeight(1, 2), 1E-10);
assertEquals(1.0, g7.getWeight(2, 1), 1E-10);
assertEquals(1.0, g7.getWeight(3, 2), 1E-10);
assertEquals(1.0, g7.getWeight(2, 3), 1E-10);
assertEquals(1.0, g7.getWeight(1, 3), 1E-10);
assertEquals(1.0, g7.getWeight(3, 1), 1E-10);
assertEquals(0.0, g7.getWeight(4, 2), 1E-10);
assertEquals(1.0, g8.getWeight(1, 4), 1E-10);
}
|
@Override
public boolean retainAll(Collection<?> c) {
boolean changed = false;
for (Iterator<?> iterator = iterator(); iterator.hasNext();) {
Object object = (Object) iterator.next();
if (!c.contains(object)) {
iterator.remove();
changed = true;
}
}
return changed;
}
|
@Test
public void testRetainAll() {
RPriorityQueue<Integer> set = redisson.getPriorityQueue("set");
for (int i = 0; i < 200; i++) {
set.add(i);
}
Assertions.assertTrue(set.retainAll(Arrays.asList(1, 2)));
Assertions.assertEquals(2, set.size());
}
|
@Override
public List<String> readFilesWithRetries(Sleeper sleeper, BackOff backOff)
throws IOException, InterruptedException {
IOException lastException = null;
do {
try {
Collection<Metadata> files = FileSystems.match(filePattern).metadata();
LOG.debug(
"Found file(s) {} by matching the path: {}",
files.stream()
.map(Metadata::resourceId)
.map(ResourceId::getFilename)
.collect(Collectors.joining(",")),
filePattern);
if (files.isEmpty()) {
continue;
}
// Read data from file paths
return readLines(files);
} catch (IOException e) {
// Ignore and retry
lastException = e;
LOG.warn("Error in file reading. Ignore and retry.");
}
} while (BackOffUtils.next(sleeper, backOff));
// Failed after max retries
throw new IOException(
String.format("Unable to read file(s) after retrying %d times", MAX_READ_RETRIES),
lastException);
}
|
@Test
public void testReadWithRetriesFailsWhenOutputDirEmpty() throws Exception {
FilePatternMatchingShardedFile shardedFile = new FilePatternMatchingShardedFile(filePattern);
thrown.expect(IOException.class);
thrown.expectMessage(containsString("Unable to read file(s) after retrying"));
shardedFile.readFilesWithRetries(fastClock, backOff);
}
|
public Duration cacheMinTimeToLive() {
return cacheMinTimeToLive;
}
|
@Test
void cacheMinTimeToLive() {
assertThat(builder.build().cacheMinTimeToLive()).isEqualTo(DEFAULT_CACHE_MIN_TIME_TO_LIVE);
Duration cacheMinTimeToLive = Duration.ofSeconds(5);
builder.cacheMinTimeToLive(cacheMinTimeToLive);
assertThat(builder.build().cacheMinTimeToLive()).isEqualTo(cacheMinTimeToLive);
}
|
@Override
@TpsControl(pointName = "HealthCheck")
public HealthCheckResponse handle(HealthCheckRequest request, RequestMeta meta) {
return new HealthCheckResponse();
}
|
@Test
void testHandle() {
HealthCheckRequestHandler handler = new HealthCheckRequestHandler();
HealthCheckResponse response = handler.handle(null, null);
assertNotNull(response);
}
|
public String defaultRemoteUrl() {
final String sanitizedUrl = sanitizeUrl();
try {
URI uri = new URI(sanitizedUrl);
if (uri.getUserInfo() != null) {
uri = new URI(uri.getScheme(), removePassword(uri.getUserInfo()), uri.getHost(), uri.getPort(), uri.getPath(), uri.getQuery(), uri.getFragment());
return uri.toString();
}
} catch (URISyntaxException e) {
return sanitizedUrl;
}
return sanitizedUrl;
}
|
@Test
void shouldNotModifyAbsoluteFilePaths() {
assertThat(new HgUrlArgument("/tmp/foo").defaultRemoteUrl(), is("/tmp/foo"));
}
|
static BigtableReadOptions translateToBigtableReadOptions(
BigtableReadOptions readOptions, BigtableOptions options) {
BigtableReadOptions.Builder builder = readOptions.toBuilder();
builder.setWaitTimeout(
org.joda.time.Duration.millis(options.getRetryOptions().getReadPartialRowTimeoutMillis()));
if (options.getCallOptionsConfig().getReadStreamRpcAttemptTimeoutMs().isPresent()) {
builder.setAttemptTimeout(
org.joda.time.Duration.millis(
options.getCallOptionsConfig().getReadStreamRpcAttemptTimeoutMs().get()));
}
builder.setOperationTimeout(
org.joda.time.Duration.millis(options.getCallOptionsConfig().getReadStreamRpcTimeoutMs()));
return builder.build();
}
|
@Test
public void testBigtableOptionsToBigtableReadOptions() throws Exception {
BigtableOptions options =
BigtableOptions.builder()
.setCallOptionsConfig(
CallOptionsConfig.builder()
.setReadRowsRpcAttemptTimeoutMs(100)
.setReadRowsRpcTimeoutMs(1000)
.build())
.setRetryOptions(
RetryOptions.builder().setInitialBackoffMillis(5).setBackoffMultiplier(1.5).build())
.build();
BigtableReadOptions readOptions =
BigtableReadOptions.builder()
.setTableId(ValueProvider.StaticValueProvider.of("table"))
.build();
BigtableReadOptions fromBigtableOptions =
BigtableConfigTranslator.translateToBigtableReadOptions(readOptions, options);
assertNotNull(fromBigtableOptions.getAttemptTimeout());
assertNotNull(fromBigtableOptions.getOperationTimeout());
assertEquals(org.joda.time.Duration.millis(100), fromBigtableOptions.getAttemptTimeout());
assertEquals(org.joda.time.Duration.millis(1000), fromBigtableOptions.getOperationTimeout());
}
|
@Override
public Set<Link> getDeviceEgressLinks(DeviceId deviceId) {
return filter(links.values(), link -> deviceId.equals(link.src().deviceId()));
}
|
@Test
public final void testGetDeviceEgressLinks() {
LinkKey linkId1 = LinkKey.linkKey(new ConnectPoint(DID1, P1), new ConnectPoint(DID2, P2));
LinkKey linkId2 = LinkKey.linkKey(new ConnectPoint(DID2, P2), new ConnectPoint(DID1, P1));
LinkKey linkId3 = LinkKey.linkKey(new ConnectPoint(DID1, P2), new ConnectPoint(DID2, P3));
putLink(linkId1, DIRECT);
putLink(linkId2, DIRECT);
putLink(linkId3, DIRECT);
// DID1,P1 => DID2,P2
// DID2,P2 => DID1,P1
// DID1,P2 => DID2,P3
Set<Link> links1 = linkStore.getDeviceEgressLinks(DID1);
assertEquals(2, links1.size());
// check
Set<Link> links2 = linkStore.getDeviceEgressLinks(DID2);
assertEquals(1, links2.size());
assertLink(linkId2, DIRECT, links2.iterator().next());
}
|
LinkedList<RewriteOp> makeRewriteOps(
Iterable<String> srcFilenames,
Iterable<String> destFilenames,
boolean deleteSource,
boolean ignoreMissingSource,
boolean ignoreExistingDest)
throws IOException {
List<String> srcList = Lists.newArrayList(srcFilenames);
List<String> destList = Lists.newArrayList(destFilenames);
checkArgument(
srcList.size() == destList.size(),
"Number of source files %s must equal number of destination files %s",
srcList.size(),
destList.size());
LinkedList<RewriteOp> rewrites = Lists.newLinkedList();
for (int i = 0; i < srcList.size(); i++) {
final GcsPath sourcePath = GcsPath.fromUri(srcList.get(i));
final GcsPath destPath = GcsPath.fromUri(destList.get(i));
if (ignoreExistingDest && !sourcePath.getBucket().equals(destPath.getBucket())) {
throw new UnsupportedOperationException(
"Skipping dest existence is only supported within a bucket.");
}
rewrites.addLast(new RewriteOp(sourcePath, destPath, deleteSource, ignoreMissingSource));
}
return rewrites;
}
|
@Test
public void testMakeRewriteOpsWithOptions() throws IOException {
GcsOptions gcsOptions = gcsOptionsWithTestCredential();
GcsUtil gcsUtil = gcsOptions.getGcsUtil();
gcsUtil.maxBytesRewrittenPerCall = 1337L;
LinkedList<RewriteOp> rewrites =
gcsUtil.makeRewriteOps(makeStrings("s", 1), makeStrings("d", 1), false, false, false);
assertEquals(1, rewrites.size());
RewriteOp rewrite = rewrites.pop();
assertTrue(rewrite.getReadyToEnqueue());
Storage.Objects.Rewrite request = rewrite.rewriteRequest;
assertEquals(Long.valueOf(1337L), request.getMaxBytesRewrittenPerCall());
}
|
@Override
public KsqlVersionMetrics collectMetrics() {
final KsqlVersionMetrics metricsRecord = new KsqlVersionMetrics();
metricsRecord.setTimestamp(TimeUnit.MILLISECONDS.toSeconds(clock.millis()));
metricsRecord.setConfluentPlatformVersion(AppInfo.getVersion());
metricsRecord.setKsqlComponentType(moduleType.name());
metricsRecord.setIsActive(activenessSupplier.get());
return metricsRecord;
}
|
@Test
public void testCollectMetricsAssignsCurrentTime() {
// Given:
final long t1 = 1000L;
final long t2 = 1001L;
when(clock.millis())
.thenReturn(TimeUnit.SECONDS.toMillis(t1))
.thenReturn(TimeUnit.SECONDS.toMillis(t2));
// When:
KsqlVersionMetrics metrics = basicCollector.collectMetrics();
// Then:
assertThat(metrics.getTimestamp(), is(t1));
metrics = basicCollector.collectMetrics();
assertThat(metrics.getTimestamp(), is(t2));
}
|
public char toChar(String name) {
return toChar(name, '\u0000');
}
|
@Test
public void testToChar_String_char() {
System.out.println("toChar");
char expResult;
char result;
Properties props = new Properties();
props.put("value1", "f");
props.put("value2", "w");
props.put("empty", "");
props.put("str", "abc");
props.put("boolean", "true");
props.put("float", "24.98");
props.put("int", "12");
props.put("char", "a");
PropertyParser instance = new PropertyParser(props);
expResult = 'f';
result = instance.toChar("value1", 't');
assertEquals(expResult, result);
expResult = 'w';
result = instance.toChar("value2", 't');
assertEquals(expResult, result);
expResult = 't';
result = instance.toChar("empty", 't');
assertEquals(expResult, result);
expResult = 't';
result = instance.toChar("str", 't');
assertEquals(expResult, result);
expResult = 't';
result = instance.toChar("boolean", 't');
assertEquals(expResult, result);
expResult = 't';
result = instance.toChar("float", 't');
assertEquals(expResult, result);
expResult = 't';
result = instance.toChar("int", 't');
assertEquals(expResult, result);
expResult = 'a';
result = instance.toChar("char", 't');
assertEquals(expResult, result);
expResult = 't';
result = instance.toChar("nonexistent", 't');
assertEquals(expResult, result);
}
|
public void containsCell(
@Nullable Object rowKey, @Nullable Object colKey, @Nullable Object value) {
containsCell(
Tables.<@Nullable Object, @Nullable Object, @Nullable Object>immutableCell(
rowKey, colKey, value));
}
|
@Test
public void containsCellFailure() {
ImmutableTable<String, String, String> table = ImmutableTable.of("row", "col", "val");
expectFailureWhenTestingThat(table).containsCell("row", "row", "val");
assertFailureKeys("value of", "expected to contain", "but was");
assertFailureValue("value of", "table.cellSet()");
assertFailureValue("expected to contain", "(row,row)=val");
assertFailureValue("but was", "[(row,col)=val]");
}
|
@Override
public Map<Consumer, List<Range>> getConsumerKeyHashRanges() {
Map<Consumer, List<Range>> result = new HashMap<>();
int start = 0;
for (Map.Entry<Integer, Consumer> entry: rangeMap.entrySet()) {
result.computeIfAbsent(entry.getValue(), key -> new ArrayList<>())
.add(Range.of(start, entry.getKey()));
start = entry.getKey() + 1;
}
return result;
}
|
@Test
public void testGetConsumerKeyHashRanges() throws BrokerServiceException.ConsumerAssignException {
HashRangeAutoSplitStickyKeyConsumerSelector selector = new HashRangeAutoSplitStickyKeyConsumerSelector(2 << 5);
List<String> consumerName = Arrays.asList("consumer1", "consumer2", "consumer3", "consumer4");
List<Consumer> consumers = new ArrayList<>();
for (String s : consumerName) {
Consumer consumer = mock(Consumer.class);
when(consumer.consumerName()).thenReturn(s);
selector.addConsumer(consumer);
consumers.add(consumer);
}
Map<Consumer, List<Range>> expectedResult = new HashMap<>();
expectedResult.put(consumers.get(0), Collections.singletonList(Range.of(49, 64)));
expectedResult.put(consumers.get(3), Collections.singletonList(Range.of(33, 48)));
expectedResult.put(consumers.get(1), Collections.singletonList(Range.of(17, 32)));
expectedResult.put(consumers.get(2), Collections.singletonList(Range.of(0, 16)));
for (Map.Entry<Consumer, List<Range>> entry : selector.getConsumerKeyHashRanges().entrySet()) {
Assert.assertEquals(entry.getValue(), expectedResult.get(entry.getKey()));
expectedResult.remove(entry.getKey());
}
Assert.assertEquals(expectedResult.size(), 0);
}
|
@Override
public Statistics estimateStatistics() {
return estimateStatistics(SnapshotUtil.latestSnapshot(table, branch));
}
|
@Test
public void testEstimatedRowCount() throws NoSuchTableException {
sql(
"CREATE TABLE %s (id BIGINT, date DATE) USING iceberg TBLPROPERTIES('%s' = '%s')",
tableName, TableProperties.DEFAULT_FILE_FORMAT, format);
Dataset<Row> df =
spark
.range(10000)
.withColumn("date", date_add(expr("DATE '1970-01-01'"), expr("CAST(id AS INT)")))
.select("id", "date");
df.coalesce(1).writeTo(tableName).append();
Table table = validationCatalog.loadTable(tableIdent);
SparkScanBuilder scanBuilder =
new SparkScanBuilder(spark, table, CaseInsensitiveStringMap.empty());
SparkScan scan = (SparkScan) scanBuilder.build();
Statistics stats = scan.estimateStatistics();
Assert.assertEquals(10000L, stats.numRows().getAsLong());
}
|
@Override
public TimestampedSegment getOrCreateSegmentIfLive(final long segmentId,
final ProcessorContext context,
final long streamTime) {
final TimestampedSegment segment = super.getOrCreateSegmentIfLive(segmentId, context, streamTime);
cleanupExpiredSegments(streamTime);
return segment;
}
|
@Test
public void shouldNotCreateSegmentThatIsAlreadyExpired() {
final long streamTime = updateStreamTimeAndCreateSegment(7);
assertNull(segments.getOrCreateSegmentIfLive(0, context, streamTime));
assertFalse(new File(context.stateDir(), "test/test.0").exists());
}
|
@Override
public Optional<String> getPlacementGroup() {
return awsMetadataApi.placementGroupEc2();
}
|
@Test
public void getPlacementGroup() {
// given
String placementGroup = "placement-group";
String partitionNumber = "42";
given(awsMetadataApi.placementGroupEc2()).willReturn(Optional.of(placementGroup));
given(awsMetadataApi.placementPartitionNumberEc2()).willReturn(Optional.of(partitionNumber));
// when
Optional<String> placementGroupResult = awsEc2Client.getPlacementGroup();
Optional<String> partitionNumberResult = awsEc2Client.getPlacementPartitionNumber();
// then
assertEquals(placementGroup, placementGroupResult.orElse("N/A"));
assertEquals(partitionNumber, partitionNumberResult.orElse("N/A"));
}
|
Subscription addSubscription(final String channel, final int streamId)
{
return addSubscription(channel, streamId, defaultAvailableImageHandler, defaultUnavailableImageHandler);
}
|
@Test
@InterruptAfter(5)
void addSubscriptionShouldTimeoutWithoutOperationSuccessful()
{
assertThrows(DriverTimeoutException.class, () -> conductor.addSubscription(CHANNEL, STREAM_ID_1));
}
|
public double calculateAveragePercentageUsedBy(NormalizedResources used, double totalMemoryMb, double usedMemoryMb) {
int skippedResourceTypes = 0;
double total = 0.0;
if (usedMemoryMb > totalMemoryMb) {
throwBecauseUsedIsNotSubsetOfTotal(used, totalMemoryMb, usedMemoryMb);
}
if (totalMemoryMb != 0.0) {
total += usedMemoryMb / totalMemoryMb;
} else {
skippedResourceTypes++;
}
double totalCpu = getTotalCpu();
if (used.getTotalCpu() > getTotalCpu()) {
throwBecauseUsedIsNotSubsetOfTotal(used, totalMemoryMb, usedMemoryMb);
}
if (totalCpu != 0.0) {
total += used.getTotalCpu() / getTotalCpu();
} else {
skippedResourceTypes++;
}
if (used.otherResources.length > otherResources.length) {
throwBecauseUsedIsNotSubsetOfTotal(used, totalMemoryMb, usedMemoryMb);
}
for (int i = 0; i < otherResources.length; i++) {
double totalValue = otherResources[i];
double usedValue;
if (i >= used.otherResources.length) {
//Resources missing from used are using none of that resource
usedValue = 0.0;
} else {
usedValue = used.otherResources[i];
}
if (usedValue > totalValue) {
throwBecauseUsedIsNotSubsetOfTotal(used, totalMemoryMb, usedMemoryMb);
}
if (totalValue == 0.0) {
//Skip any resources where the total is 0, the percent used for this resource isn't meaningful.
//We fall back to prioritizing by cpu, memory and any other resources by ignoring this value
skippedResourceTypes++;
continue;
}
total += usedValue / totalValue;
}
//Adjust the divisor for the average to account for any skipped resources (those where the total was 0)
int divisor = 2 + otherResources.length - skippedResourceTypes;
if (divisor == 0) {
/*
* This is an arbitrary choice to make the result consistent with calculateMin. Any value would be valid here, becase there are
* no (non-zero) resources in the total set of resources, so we're trying to average 0 values.
*/
return 100.0;
} else {
return (total * 100.0) / divisor;
}
}
|
@Test
public void testCalculateAvgWithCpuAndMem() {
NormalizedResources resources = new NormalizedResources(normalize(Collections.singletonMap(Constants.COMMON_CPU_RESOURCE_NAME, 2)));
NormalizedResources usedResources = new NormalizedResources(normalize(Collections.singletonMap(Constants.COMMON_CPU_RESOURCE_NAME, 1)));
double avg = resources.calculateAveragePercentageUsedBy(usedResources, 4, 1);
assertThat(avg, is((50.0 + 25.0)/2));
}
|
public Integer doCall() throws Exception {
if (name == null && label == null && filePath == null) {
printer().println("Name or label selector must be set");
return 1;
}
if (label == null) {
String projectName;
if (name != null) {
projectName = KubernetesHelper.sanitize(name);
} else {
projectName = KubernetesHelper.sanitize(FileUtil.onlyName(SourceScheme.onlyName(filePath)));
}
label = "%s=%s".formatted(BaseTrait.INTEGRATION_LABEL, projectName);
}
String[] parts = label.split("=", 2);
if (parts.length != 2) {
printer().println("--label selector must be in syntax: key=value");
}
boolean shouldResume = true;
int resumeCount = 0;
while (shouldResume) {
shouldResume = watchLogs(parts[0], parts[1], container, resumeCount);
resumeCount++;
}
return 0;
}
|
@Test
public void shouldGetPodLogs() throws Exception {
Pod pod = new PodBuilder()
.withNewMetadata()
.withName("pod")
.withLabels(Collections.singletonMap(BaseTrait.INTEGRATION_LABEL, "routes"))
.endMetadata()
.withNewStatus()
.withPhase("Running")
.endStatus()
.build();
kubernetesClient.pods().resource(pod).create();
PodLogs command = createCommand();
command.name = "routes";
int exit = command.doCall();
Assertions.assertEquals(0, exit);
}
|
public static <T> RetryTransformer<T> of(Retry retry) {
return new RetryTransformer<>(retry);
}
|
@Test
public void returnOnCompleteUsingCompletable() throws InterruptedException {
RetryConfig config = retryConfig();
Retry retry = Retry.of("testName", config);
RetryTransformer<Object> retryTransformer = RetryTransformer.of(retry);
doNothing()
.doThrow(new HelloWorldException())
.doThrow(new HelloWorldException())
.doNothing()
.when(helloWorldService).sayHelloWorld();
Completable.fromRunnable(helloWorldService::sayHelloWorld)
.compose(retryTransformer)
.test()
.await()
.assertNoValues()
.assertComplete();
Completable.fromRunnable(helloWorldService::sayHelloWorld)
.compose(retryTransformer)
.test()
.await()
.assertNoValues()
.assertComplete();
then(helloWorldService).should(times(4)).sayHelloWorld();
Retry.Metrics metrics = retry.getMetrics();
assertThat(metrics.getNumberOfSuccessfulCallsWithoutRetryAttempt()).isEqualTo(1);
assertThat(metrics.getNumberOfSuccessfulCallsWithRetryAttempt()).isEqualTo(1);
assertThat(metrics.getNumberOfFailedCallsWithRetryAttempt()).isZero();
assertThat(metrics.getNumberOfFailedCallsWithoutRetryAttempt()).isZero();
}
|
@Override
public void promoteJobPartitions(Collection<ResultPartitionID> partitionsToPromote) {
LOG.debug("Promoting Job Partitions {}", partitionsToPromote);
if (partitionsToPromote.isEmpty()) {
return;
}
final Collection<PartitionTrackerEntry<JobID, TaskExecutorPartitionInfo>>
partitionTrackerEntries = stopTrackingPartitions(partitionsToPromote);
for (PartitionTrackerEntry<JobID, TaskExecutorPartitionInfo> partitionTrackerEntry :
partitionTrackerEntries) {
final TaskExecutorPartitionInfo dataSetMetaInfo = partitionTrackerEntry.getMetaInfo();
final DataSetEntry dataSetEntry =
clusterPartitions.computeIfAbsent(
dataSetMetaInfo.getIntermediateDataSetId(),
ignored -> new DataSetEntry(dataSetMetaInfo.getNumberOfPartitions()));
dataSetEntry.addPartition(partitionTrackerEntry.getMetaInfo().getShuffleDescriptor());
}
}
|
@Test
void promoteJobPartitions() throws Exception {
final TestingShuffleEnvironment testingShuffleEnvironment = new TestingShuffleEnvironment();
final CompletableFuture<Collection<ResultPartitionID>> shuffleReleaseFuture =
new CompletableFuture<>();
testingShuffleEnvironment.releasePartitionsLocallyFuture = shuffleReleaseFuture;
final JobID jobId = new JobID();
final ResultPartitionID resultPartitionId1 = new ResultPartitionID();
final ResultPartitionID resultPartitionId2 = new ResultPartitionID();
final TaskExecutorPartitionTracker partitionTracker =
new TaskExecutorPartitionTrackerImpl(testingShuffleEnvironment);
partitionTracker.startTrackingPartition(
jobId,
new TaskExecutorPartitionInfo(
new TestingShuffleDescriptor(resultPartitionId1),
new IntermediateDataSetID(),
1));
partitionTracker.startTrackingPartition(
jobId,
new TaskExecutorPartitionInfo(
new TestingShuffleDescriptor(resultPartitionId2),
new IntermediateDataSetID(),
1));
partitionTracker.promoteJobPartitions(Collections.singleton(resultPartitionId1));
partitionTracker.stopTrackingAndReleaseJobPartitionsFor(jobId);
assertThatFuture(shuffleReleaseFuture)
.eventuallySucceeds()
.asList()
.doesNotContain(resultPartitionId1);
}
|
@Override
public String[] getParameterValues(String name) {
return stringMap.get(name);
}
|
@Test
void testGetParameterValues() {
String[] values = reuseHttpServletRequest.getParameterValues("value");
assertNotNull(values);
assertEquals(1, values.length);
assertEquals("123", values[0]);
}
|
public static void verifyPrecondition(boolean assertionResult, String errorMessage) {
if (!assertionResult) {
throw new RuntimeException(errorMessage);
}
}
|
@Test
public void testVerifyPrecondition() {
verifyPrecondition(true, "");
}
|
@Override
public ColumnStatisticsObj aggregate(List<ColStatsObjWithSourceInfo> colStatsWithSourceInfo,
List<String> partNames, boolean areAllPartsFound) throws MetaException {
checkStatisticsList(colStatsWithSourceInfo);
ColumnStatisticsObj statsObj = null;
String colType;
String colName = null;
// check if all the ColumnStatisticsObjs contain stats and all the ndv are
// bitvectors
boolean doAllPartitionContainStats = partNames.size() == colStatsWithSourceInfo.size();
NumDistinctValueEstimator ndvEstimator = null;
boolean areAllNDVEstimatorsMergeable = true;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
if (statsObj == null) {
colName = cso.getColName();
colType = cso.getColType();
statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType,
cso.getStatsData().getSetField());
LOG.trace("doAllPartitionContainStats for column: {} is: {}", colName,
doAllPartitionContainStats);
}
DecimalColumnStatsDataInspector columnStatsData = decimalInspectorFromStats(cso);
// check if we can merge NDV estimators
if (columnStatsData.getNdvEstimator() == null) {
areAllNDVEstimatorsMergeable = false;
break;
} else {
NumDistinctValueEstimator estimator = columnStatsData.getNdvEstimator();
if (ndvEstimator == null) {
ndvEstimator = estimator;
} else {
if (!ndvEstimator.canMerge(estimator)) {
areAllNDVEstimatorsMergeable = false;
break;
}
}
}
}
if (areAllNDVEstimatorsMergeable && ndvEstimator != null) {
ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator);
}
LOG.debug("all of the bit vectors can merge for {} is {}", colName, areAllNDVEstimatorsMergeable);
ColumnStatisticsData columnStatisticsData = initColumnStatisticsData();
if (doAllPartitionContainStats || colStatsWithSourceInfo.size() < 2) {
DecimalColumnStatsDataInspector aggregateData = null;
long lowerBound = 0;
long higherBound = 0;
double densityAvgSum = 0.0;
DecimalColumnStatsMerger merger = new DecimalColumnStatsMerger();
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
DecimalColumnStatsDataInspector newData = decimalInspectorFromStats(cso);
lowerBound = Math.max(lowerBound, newData.getNumDVs());
higherBound += newData.getNumDVs();
if (newData.isSetLowValue() && newData.isSetHighValue()) {
densityAvgSum += (MetaStoreServerUtils.decimalToDouble(newData.getHighValue()) - MetaStoreServerUtils
.decimalToDouble(newData.getLowValue())) / newData.getNumDVs();
}
if (areAllNDVEstimatorsMergeable && ndvEstimator != null) {
ndvEstimator.mergeEstimators(newData.getNdvEstimator());
}
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
aggregateData.setLowValue(merger.mergeLowValue(
merger.getLowValue(aggregateData), merger.getLowValue(newData)));
aggregateData.setHighValue(merger.mergeHighValue(
merger.getHighValue(aggregateData), merger.getHighValue(newData)));
aggregateData.setNumNulls(merger.mergeNumNulls(aggregateData.getNumNulls(), newData.getNumNulls()));
aggregateData.setNumDVs(merger.mergeNumDVs(aggregateData.getNumDVs(), newData.getNumDVs()));
}
}
if (areAllNDVEstimatorsMergeable && ndvEstimator != null) {
// if all the ColumnStatisticsObjs contain bitvectors, we do not need to
// use uniform distribution assumption because we can merge bitvectors
// to get a good estimation.
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
} else {
long estimation;
if (useDensityFunctionForNDVEstimation && aggregateData != null
&& aggregateData.isSetLowValue() && aggregateData.isSetHighValue()) {
// We have estimation, lowerbound and higherbound. We use estimation
// if it is between lowerbound and higherbound.
double densityAvg = densityAvgSum / partNames.size();
estimation = (long) ((MetaStoreServerUtils.decimalToDouble(aggregateData.getHighValue()) - MetaStoreServerUtils
.decimalToDouble(aggregateData.getLowValue())) / densityAvg);
if (estimation < lowerBound) {
estimation = lowerBound;
} else if (estimation > higherBound) {
estimation = higherBound;
}
} else {
estimation = (long) (lowerBound + (higherBound - lowerBound) * ndvTuner);
}
aggregateData.setNumDVs(estimation);
}
columnStatisticsData.setDecimalStats(aggregateData);
} else {
// TODO: bail out if missing stats are over a certain threshold
// we need extrapolation
LOG.debug("start extrapolation for {}", colName);
Map<String, Integer> indexMap = new HashMap<>();
for (int index = 0; index < partNames.size(); index++) {
indexMap.put(partNames.get(index), index);
}
Map<String, Double> adjustedIndexMap = new HashMap<>();
Map<String, ColumnStatisticsData> adjustedStatsMap = new HashMap<>();
// while we scan the css, we also get the densityAvg, lowerbound and
// higherbound when useDensityFunctionForNDVEstimation is true.
double densityAvgSum = 0.0;
if (!areAllNDVEstimatorsMergeable) {
// if not every partition uses bitvector for ndv, we just fall back to
// the traditional extrapolation methods.
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
String partName = csp.getPartName();
DecimalColumnStatsData newData = cso.getStatsData().getDecimalStats();
if (useDensityFunctionForNDVEstimation && newData.isSetLowValue() && newData.isSetHighValue()) {
densityAvgSum += (MetaStoreServerUtils.decimalToDouble(newData.getHighValue()) - MetaStoreServerUtils
.decimalToDouble(newData.getLowValue())) / newData.getNumDVs();
}
adjustedIndexMap.put(partName, (double) indexMap.get(partName));
adjustedStatsMap.put(partName, cso.getStatsData());
}
} else {
// we first merge all the adjacent bitvectors that we could merge and
// derive new partition names and index.
StringBuilder pseudoPartName = new StringBuilder();
double pseudoIndexSum = 0;
int length = 0;
int curIndex = -1;
DecimalColumnStatsDataInspector aggregateData = null;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
String partName = csp.getPartName();
DecimalColumnStatsDataInspector newData = decimalInspectorFromStats(cso);
// newData.isSetBitVectors() should be true for sure because we
// already checked it before.
if (indexMap.get(partName) != curIndex) {
// There is bitvector, but it is not adjacent to the previous ones.
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setDecimalStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
if (useDensityFunctionForNDVEstimation) {
densityAvgSum += (MetaStoreServerUtils.decimalToDouble(aggregateData.getHighValue()) - MetaStoreServerUtils
.decimalToDouble(aggregateData.getLowValue())) / aggregateData.getNumDVs();
}
// reset everything
pseudoPartName = new StringBuilder();
pseudoIndexSum = 0;
length = 0;
ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator);
}
aggregateData = null;
}
curIndex = indexMap.get(partName);
pseudoPartName.append(partName);
pseudoIndexSum += curIndex;
length++;
curIndex++;
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
if (MetaStoreServerUtils.decimalToDouble(aggregateData.getLowValue()) < MetaStoreServerUtils
.decimalToDouble(newData.getLowValue())) {
aggregateData.setLowValue(aggregateData.getLowValue());
} else {
aggregateData.setLowValue(newData.getLowValue());
}
if (MetaStoreServerUtils.decimalToDouble(aggregateData.getHighValue()) > MetaStoreServerUtils
.decimalToDouble(newData.getHighValue())) {
aggregateData.setHighValue(aggregateData.getHighValue());
} else {
aggregateData.setHighValue(newData.getHighValue());
}
aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls());
}
ndvEstimator.mergeEstimators(newData.getNdvEstimator());
}
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setDecimalStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
if (useDensityFunctionForNDVEstimation) {
densityAvgSum += (MetaStoreServerUtils.decimalToDouble(aggregateData.getHighValue()) - MetaStoreServerUtils
.decimalToDouble(aggregateData.getLowValue())) / aggregateData.getNumDVs();
}
}
}
extrapolate(columnStatisticsData, partNames.size(), colStatsWithSourceInfo.size(),
adjustedIndexMap, adjustedStatsMap, densityAvgSum / adjustedStatsMap.size());
}
LOG.debug(
"Ndv estimation for {} is {}. # of partitions requested: {}. # of partitions found: {}",
colName, columnStatisticsData.getDecimalStats().getNumDVs(), partNames.size(),
colStatsWithSourceInfo.size());
KllHistogramEstimator mergedKllHistogramEstimator = mergeHistograms(colStatsWithSourceInfo);
if (mergedKllHistogramEstimator != null) {
columnStatisticsData.getDecimalStats().setHistogram(mergedKllHistogramEstimator.serialize());
}
statsObj.setStatsData(columnStatisticsData);
return statsObj;
}
|
@Test
public void testAggregateMultiStatsWhenAllAvailable() throws MetaException {
List<String> partitions = Arrays.asList("part1", "part2", "part3");
ColumnStatisticsData data1 = new ColStatsBuilder<>(Decimal.class).numNulls(1).numDVs(3)
.low(ONE).high(THREE).hll(1, 2, 3).kll(1, 2, 3).build();
ColumnStatisticsData data2 = new ColStatsBuilder<>(Decimal.class).numNulls(2).numDVs(3)
.low(THREE).high(FIVE).hll(3, 4, 5).kll(3, 4, 5).build();
ColumnStatisticsData data3 = new ColStatsBuilder<>(Decimal.class).numNulls(3).numDVs(2)
.low(SIX).high(SEVEN).hll(6, 7).kll(6, 7).build();
List<ColStatsObjWithSourceInfo> statsList = Arrays.asList(
createStatsWithInfo(data1, TABLE, COL, partitions.get(0)),
createStatsWithInfo(data2, TABLE, COL, partitions.get(1)),
createStatsWithInfo(data3, TABLE, COL, partitions.get(2)));
DecimalColumnStatsAggregator aggregator = new DecimalColumnStatsAggregator();
ColumnStatisticsObj computedStatsObj = aggregator.aggregate(statsList, partitions, true);
// the aggregation does not update hll, only numDVs is, it keeps the first hll
// notice that numDVs is computed by using HLL, it can detect that '3' appears twice
ColumnStatisticsData expectedStats = new ColStatsBuilder<>(Decimal.class).numNulls(6).numDVs(7)
.low(ONE).high(SEVEN).hll(1, 2, 3).kll(1, 2, 3, 3, 4, 5, 6, 7).build();
assertEqualStatistics(expectedStats, computedStatsObj.getStatsData());
}
|
public static void register(String name, Object instance) {
try {
Class<Object> mbeanInterface = guessMBeanInterface(instance);
ManagementFactory.getPlatformMBeanServer().registerMBean(new StandardMBean(instance, mbeanInterface), new ObjectName(name));
} catch (MalformedObjectNameException | NotCompliantMBeanException | InstanceAlreadyExistsException | MBeanRegistrationException e) {
throw new IllegalStateException("Can not register MBean [" + name + "]", e);
}
}
|
@Test
public void register_fails_if_mbean_interface_can_not_be_found() {
assertThatThrownBy(() -> Jmx.register(FAKE_NAME, "not a mbean"))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Can not find the MBean interface of class java.lang.String");
}
|
@Override
public void flush() {
store.flush();
}
|
@Test
public void shouldFlushVersionedStore() {
givenWrapperWithVersionedStore();
wrapper.flush();
verify(versionedStore).flush();
}
|
@Override
public void handle(final ConsumerRecord<String, Bytes> record) {
try {
if (record.key().startsWith("register-")) {
serviceReportProperties(InstanceProperties.parseFrom(record.value().get()));
} else {
keepAlive(InstancePingPkg.parseFrom(record.value().get()));
}
} catch (Exception e) {
log.error("handle record failed", e);
}
}
|
@Test
public void testHandler() {
InstanceProperties properties = InstanceProperties.newBuilder()
.setService(SERVICE)
.setServiceInstance(SERVICE_INSTANCE)
.build();
InstancePingPkg ping = InstancePingPkg.newBuilder()
.setService(SERVICE)
.setServiceInstance(SERVICE_INSTANCE)
.build();
handler.handle(new ConsumerRecord<>(TOPIC_NAME, 0, 0, "register", Bytes.wrap(properties.toByteArray())));
handler.handle(
new ConsumerRecord<>(TOPIC_NAME, 0, 0, ping.getServiceInstance(), Bytes.wrap(ping.toByteArray())));
}
|
static Multimap<String, Range<Integer>> extractHighlightRanges(Map<String, List<String>> highlight) {
if (highlight == null || highlight.isEmpty()) {
return ImmutableListMultimap.of();
}
final ImmutableListMultimap.Builder<String, Range<Integer>> builder = ImmutableListMultimap.builder();
highlight.forEach((key, value) -> extractRange(value).forEach(range -> builder.put(key, range)));
return builder.build();
}
|
@Test
public void brokenHighlight() throws Exception {
final Map<String, List<String>> highlights = ImmutableMap.of(
"message", ImmutableList.of("/<em>usr</em>/sbin</em>/cron[22390]<em>: (root) CMD (/<em>usr</em>/libexec/atrun)")
);
final Multimap<String, Range<Integer>> result = HighlightParser.extractHighlightRanges(highlights);
assertThat(result).isNotNull();
assertThat(result.get("message"))
.isNotEmpty()
.containsExactly(
Range.closed(1, 4),
Range.closed(26, 48)
);
}
|
@Override
public int run(InputStream stdin, PrintStream out, PrintStream err, List<String> args) throws Exception {
if (args.size() < 2) {
printInfo(err);
return 1;
}
int index = 0;
String input = args.get(index);
String option = "all";
if ("-o".equals(input)) {
option = args.get(1);
index += 2;
}
if (!OPTIONS.contains(option) || (args.size() - index < 1)) {
printInfo(err);
return 1;
}
input = args.get(index++);
if (!REPORT.equals(option)) {
if (args.size() - index < 1) {
printInfo(err);
return 1;
}
}
if (ALL.equals(option)) {
return recoverAll(input, args.get(index), out, err);
} else if (PRIOR.equals(option)) {
return recoverPrior(input, args.get(index), out, err);
} else if (AFTER.equals(option)) {
return recoverAfter(input, args.get(index), out, err);
} else if (REPORT.equals(option)) {
return reportOnly(input, out, err);
} else {
return 1;
}
}
|
@Test
void repairAfterCorruptBlock() throws Exception {
String output = run(new DataFileRepairTool(), "-o", "after", corruptBlockFile.getPath(), repairedFile.getPath());
assertTrue(output.contains("Number of blocks: 2 Number of corrupt blocks: 1"), output);
assertTrue(output.contains("Number of records: 5 Number of corrupt records: 0"), output);
checkFileContains(repairedFile, "guava", "hazelnut");
}
|
public Point<T> build() {
requireNonNull(epochTime, "Points must have a time");
requireNonNull(latitude, "Points must have a latitude");
requireNonNull(longitude, "Points must have a longitude");
Position pos = new Position(
Instant.ofEpochMilli(epochTime), LatLong.of(latitude, longitude), altitude);
Velocity vel = (nonNull(speed) && nonNull(course))
? new Velocity(speed, course)
: null;
return new Point<>(pos, vel, trackId, rawData);
}
|
@Test
public void testCopier() {
Point<String> testPoint = getTestPoint();
Point<String> copiedPoint = (new PointBuilder<>(testPoint)).build();
assertThat(testPoint, is(copiedPoint));
}
|
public static boolean validHostPattern(String address) {
return VALID_HOST_CHARACTERS.matcher(address).matches();
}
|
@Test
public void testHostPattern() {
assertTrue(validHostPattern("127.0.0.1"));
assertTrue(validHostPattern("mydomain.com"));
assertTrue(validHostPattern("MyDomain.com"));
assertTrue(validHostPattern("My_Domain.com"));
assertTrue(validHostPattern("::1"));
assertTrue(validHostPattern("2001:db8:85a3:8d3:1319:8a2e:370"));
}
|
@Override
public String toString() {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("x=");
stringBuilder.append(this.tileX);
stringBuilder.append(", y=");
stringBuilder.append(this.tileY);
stringBuilder.append(", z=");
stringBuilder.append(this.zoomLevel);
return stringBuilder.toString();
}
|
@Test
public void toStringTest() {
Tile tile = new Tile(1, 2, (byte) 3, TILE_SIZE);
Assert.assertEquals(TILE_TO_STRING, tile.toString());
}
|
public static <T> Object create(Class<T> iface, T implementation,
RetryPolicy retryPolicy) {
return RetryProxy.create(iface,
new DefaultFailoverProxyProvider<T>(iface, implementation),
retryPolicy);
}
|
@Test
public void testExponentialRetry() throws UnreliableException {
UnreliableInterface unreliable = (UnreliableInterface)
RetryProxy.create(UnreliableInterface.class, unreliableImpl,
exponentialBackoffRetry(5, 1L, TimeUnit.NANOSECONDS));
unreliable.alwaysSucceeds();
unreliable.failsOnceThenSucceeds();
try {
unreliable.failsTenTimesThenSucceeds();
fail("Should fail");
} catch (UnreliableException e) {
// expected
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.