focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@Udf(schema = "ARRAY<STRUCT<K STRING, V BIGINT>>")
public List<Struct> entriesBigInt(
@UdfParameter(description = "The map to create entries from") final Map<String, Long> map,
@UdfParameter(description = "If true then the resulting entries are sorted by key")
final boolean sorted
) {
return entries(map, BIGINT_STRUCT_SCHEMA, sorted);
}
|
@Test
public void shouldComputeBigIntEntries() {
final Map<String, Long> map = createMap(Long::valueOf);
shouldComputeEntries(map, () -> entriesUdf.entriesBigInt(map, false));
}
|
@Override
public List<PinotTaskConfig> generateTasks(List<TableConfig> tableConfigs) {
String taskType = MergeRollupTask.TASK_TYPE;
List<PinotTaskConfig> pinotTaskConfigs = new ArrayList<>();
for (TableConfig tableConfig : tableConfigs) {
if (!validate(tableConfig, taskType)) {
continue;
}
String tableNameWithType = tableConfig.getTableName();
LOGGER.info("Start generating task configs for table: {} for task: {}", tableNameWithType, taskType);
// Get all segment metadata
List<SegmentZKMetadata> allSegments = getSegmentsZKMetadataForTable(tableNameWithType);
// Filter segments based on status
List<SegmentZKMetadata> preSelectedSegmentsBasedOnStatus
= filterSegmentsBasedOnStatus(tableConfig.getTableType(), allSegments);
// Select current segment snapshot based on lineage, filter out empty segments
SegmentLineage segmentLineage = _clusterInfoAccessor.getSegmentLineage(tableNameWithType);
Set<String> preSelectedSegmentsBasedOnLineage = new HashSet<>();
for (SegmentZKMetadata segment : preSelectedSegmentsBasedOnStatus) {
preSelectedSegmentsBasedOnLineage.add(segment.getSegmentName());
}
SegmentLineageUtils.filterSegmentsBasedOnLineageInPlace(preSelectedSegmentsBasedOnLineage, segmentLineage);
List<SegmentZKMetadata> preSelectedSegments = new ArrayList<>();
for (SegmentZKMetadata segment : preSelectedSegmentsBasedOnStatus) {
if (preSelectedSegmentsBasedOnLineage.contains(segment.getSegmentName()) && segment.getTotalDocs() > 0
&& MergeTaskUtils.allowMerge(segment)) {
preSelectedSegments.add(segment);
}
}
if (preSelectedSegments.isEmpty()) {
// Reset the watermark time if no segment found. This covers the case where the table is newly created or
// all segments for the existing table got deleted.
resetDelayMetrics(tableNameWithType);
LOGGER.info("Skip generating task: {} for table: {}, no segment is found.", taskType, tableNameWithType);
continue;
}
// Sort segments based on startTimeMs, endTimeMs and segmentName in ascending order
preSelectedSegments.sort((a, b) -> {
long aStartTime = a.getStartTimeMs();
long bStartTime = b.getStartTimeMs();
if (aStartTime != bStartTime) {
return Long.compare(aStartTime, bStartTime);
}
long aEndTime = a.getEndTimeMs();
long bEndTime = b.getEndTimeMs();
return aEndTime != bEndTime ? Long.compare(aEndTime, bEndTime)
: a.getSegmentName().compareTo(b.getSegmentName());
});
// Sort merge levels based on bucket time period
Map<String, String> taskConfigs = tableConfig.getTaskConfig().getConfigsForTaskType(taskType);
Map<String, Map<String, String>> mergeLevelToConfigs = MergeRollupTaskUtils.getLevelToConfigMap(taskConfigs);
List<Map.Entry<String, Map<String, String>>> sortedMergeLevelConfigs =
new ArrayList<>(mergeLevelToConfigs.entrySet());
sortedMergeLevelConfigs.sort(Comparator.comparingLong(
e -> TimeUtils.convertPeriodToMillis(e.getValue().get(MinionConstants.MergeTask.BUCKET_TIME_PERIOD_KEY))));
// Get incomplete merge levels
Set<String> inCompleteMergeLevels = new HashSet<>();
for (Map.Entry<String, TaskState> entry : TaskGeneratorUtils.getIncompleteTasks(taskType, tableNameWithType,
_clusterInfoAccessor).entrySet()) {
for (PinotTaskConfig taskConfig : _clusterInfoAccessor.getTaskConfigs(entry.getKey())) {
inCompleteMergeLevels.add(taskConfig.getConfigs().get(MergeRollupTask.MERGE_LEVEL_KEY));
}
}
// Get scheduling mode which is "processFromWatermark" by default. If "processAll" mode is enabled, there will be
// no watermark, and each round we pick the buckets in chronological order which have unmerged segments.
boolean processAll = MergeTask.PROCESS_ALL_MODE.equalsIgnoreCase(taskConfigs.get(MergeTask.MODE));
ZNRecord mergeRollupTaskZNRecord = _clusterInfoAccessor
.getMinionTaskMetadataZNRecord(MinionConstants.MergeRollupTask.TASK_TYPE, tableNameWithType);
int expectedVersion = mergeRollupTaskZNRecord != null ? mergeRollupTaskZNRecord.getVersion() : -1;
MergeRollupTaskMetadata mergeRollupTaskMetadata =
mergeRollupTaskZNRecord != null ? MergeRollupTaskMetadata.fromZNRecord(mergeRollupTaskZNRecord)
: new MergeRollupTaskMetadata(tableNameWithType, new TreeMap<>());
List<PinotTaskConfig> pinotTaskConfigsForTable = new ArrayList<>();
// Schedule tasks from lowest to highest merge level (e.g. Hourly -> Daily -> Monthly -> Yearly)
String mergeLevel = null;
for (Map.Entry<String, Map<String, String>> mergeLevelConfig : sortedMergeLevelConfigs) {
String lowerMergeLevel = mergeLevel;
mergeLevel = mergeLevelConfig.getKey();
Map<String, String> mergeConfigs = mergeLevelConfig.getValue();
// Skip scheduling if there's incomplete task for current mergeLevel
if (inCompleteMergeLevels.contains(mergeLevel)) {
LOGGER.info("Found incomplete task of merge level: {} for the same table: {}, Skipping task generation: {}",
mergeLevel, tableNameWithType, taskType);
continue;
}
// Get the bucket size, buffer size and maximum number of parallel buckets (by default 1)
String bucketPeriod = mergeConfigs.get(MergeTask.BUCKET_TIME_PERIOD_KEY);
long bucketMs = TimeUtils.convertPeriodToMillis(bucketPeriod);
if (bucketMs <= 0) {
LOGGER.error("Bucket time period: {} (table : {}, mergeLevel : {}) must be larger than 0", bucketPeriod,
tableNameWithType, mergeLevel);
continue;
}
String bufferPeriod = mergeConfigs.get(MergeTask.BUFFER_TIME_PERIOD_KEY);
long bufferMs = TimeUtils.convertPeriodToMillis(bufferPeriod);
if (bufferMs < 0) {
LOGGER.error("Buffer time period: {} (table : {}, mergeLevel : {}) must be larger or equal to 0",
bufferPeriod, tableNameWithType, mergeLevel);
continue;
}
String maxNumParallelBucketsStr = mergeConfigs.get(MergeTask.MAX_NUM_PARALLEL_BUCKETS);
int maxNumParallelBuckets = maxNumParallelBucketsStr != null ? Integer.parseInt(maxNumParallelBucketsStr)
: DEFAULT_NUM_PARALLEL_BUCKETS;
if (maxNumParallelBuckets <= 0) {
LOGGER.error("Maximum number of parallel buckets: {} (table : {}, mergeLevel : {}) must be larger than 0",
maxNumParallelBuckets, tableNameWithType, mergeLevel);
continue;
}
// Get bucket start/end time
long preSelectedSegStartTimeMs = preSelectedSegments.get(0).getStartTimeMs();
long bucketStartMs = preSelectedSegStartTimeMs / bucketMs * bucketMs;
long watermarkMs = 0;
if (!processAll) {
// Get watermark from MergeRollupTaskMetadata ZNode
// bucketStartMs = watermarkMs
// bucketEndMs = bucketStartMs + bucketMs
watermarkMs = getWatermarkMs(preSelectedSegStartTimeMs, bucketMs, mergeLevel,
mergeRollupTaskMetadata);
bucketStartMs = watermarkMs;
}
long bucketEndMs = bucketStartMs + bucketMs;
if (lowerMergeLevel == null) {
long lowestLevelMaxValidBucketEndTimeMs = Long.MIN_VALUE;
for (SegmentZKMetadata preSelectedSegment : preSelectedSegments) {
// Compute lowestLevelMaxValidBucketEndTimeMs among segments that are ready for merge
long currentValidBucketEndTimeMs =
getValidBucketEndTimeMsForSegment(preSelectedSegment, bucketMs, bufferMs);
lowestLevelMaxValidBucketEndTimeMs =
Math.max(lowestLevelMaxValidBucketEndTimeMs, currentValidBucketEndTimeMs);
}
_tableLowestLevelMaxValidBucketEndTimeMs.put(tableNameWithType, lowestLevelMaxValidBucketEndTimeMs);
}
// Create metrics even if there's no task scheduled, this helps the case that the controller is restarted
// but the metrics are not available until the controller schedules a valid task
List<String> sortedMergeLevels =
sortedMergeLevelConfigs.stream().map(e -> e.getKey()).collect(Collectors.toList());
if (processAll) {
createOrUpdateNumBucketsToProcessMetrics(tableNameWithType, mergeLevel, lowerMergeLevel, bufferMs, bucketMs,
preSelectedSegments, sortedMergeLevels);
} else {
createOrUpdateDelayMetrics(tableNameWithType, mergeLevel, null, watermarkMs, bufferMs, bucketMs);
}
if (!isValidBucketEndTime(bucketEndMs, bufferMs, lowerMergeLevel, mergeRollupTaskMetadata, processAll)) {
LOGGER.info("Bucket with start: {} and end: {} (table : {}, mergeLevel : {}, mode : {}) cannot be merged yet",
bucketStartMs, bucketEndMs, tableNameWithType, mergeLevel, processAll ? MergeTask.PROCESS_ALL_MODE
: MergeTask.PROCESS_FROM_WATERMARK_MODE);
continue;
}
// Find overlapping segments for each bucket, skip the buckets that has all segments merged
List<List<SegmentZKMetadata>> selectedSegmentsForAllBuckets = new ArrayList<>(maxNumParallelBuckets);
List<SegmentZKMetadata> selectedSegmentsForBucket = new ArrayList<>();
boolean hasUnmergedSegments = false;
boolean hasSpilledOverData = false;
boolean areAllSegmentsReadyToMerge = true;
// The for loop terminates in following cases:
// 1. Found buckets with unmerged segments:
// For each bucket find all segments overlapping with the target bucket, skip the bucket if all overlapping
// segments are merged. Schedule k (numParallelBuckets) buckets at most, and stops at the first bucket that
// contains spilled over data.
// One may wonder how a segment with records spanning different buckets is handled. The short answer is that
// it will be cut into multiple segments, each for a separate bucket. This is achieved by setting bucket time
// period as PARTITION_BUCKET_TIME_PERIOD when generating PinotTaskConfigs
// 2. There's no bucket with unmerged segments, skip scheduling
for (SegmentZKMetadata preSelectedSegment : preSelectedSegments) {
long startTimeMs = preSelectedSegment.getStartTimeMs();
if (startTimeMs < bucketEndMs) {
long endTimeMs = preSelectedSegment.getEndTimeMs();
if (endTimeMs >= bucketStartMs) {
// For segments overlapping with current bucket, add to the result list
if (!isMergedSegment(preSelectedSegment, mergeLevel, sortedMergeLevels)) {
hasUnmergedSegments = true;
}
if (!isMergedSegment(preSelectedSegment, lowerMergeLevel, sortedMergeLevels)) {
areAllSegmentsReadyToMerge = false;
}
if (hasSpilledOverData(preSelectedSegment, bucketMs)) {
hasSpilledOverData = true;
}
selectedSegmentsForBucket.add(preSelectedSegment);
}
// endTimeMs < bucketStartMs
// Haven't find the first overlapping segment, continue to the next segment
} else {
// Has gone through all overlapping segments for current bucket
if (hasUnmergedSegments && areAllSegmentsReadyToMerge) {
// Add the bucket if there are unmerged segments
selectedSegmentsForAllBuckets.add(selectedSegmentsForBucket);
}
if (selectedSegmentsForAllBuckets.size() == maxNumParallelBuckets || hasSpilledOverData) {
// If there are enough buckets or found spilled over data, schedule merge tasks
break;
} else {
// Start with a new bucket
// TODO: If there are many small merged segments, we should merge them again
selectedSegmentsForBucket = new ArrayList<>();
hasUnmergedSegments = false;
areAllSegmentsReadyToMerge = true;
bucketStartMs = (startTimeMs / bucketMs) * bucketMs;
bucketEndMs = bucketStartMs + bucketMs;
if (!isValidBucketEndTime(bucketEndMs, bufferMs, lowerMergeLevel, mergeRollupTaskMetadata, processAll)) {
break;
}
if (!isMergedSegment(preSelectedSegment, mergeLevel, sortedMergeLevels)) {
hasUnmergedSegments = true;
}
if (!isMergedSegment(preSelectedSegment, lowerMergeLevel, sortedMergeLevels)) {
areAllSegmentsReadyToMerge = false;
}
if (hasSpilledOverData(preSelectedSegment, bucketMs)) {
hasSpilledOverData = true;
}
selectedSegmentsForBucket.add(preSelectedSegment);
}
}
}
// Add the last bucket if it contains unmerged segments and is not added before
if (hasUnmergedSegments && areAllSegmentsReadyToMerge && (selectedSegmentsForAllBuckets.isEmpty() || (
selectedSegmentsForAllBuckets.get(selectedSegmentsForAllBuckets.size() - 1)
!= selectedSegmentsForBucket))) {
selectedSegmentsForAllBuckets.add(selectedSegmentsForBucket);
}
if (selectedSegmentsForAllBuckets.isEmpty()) {
LOGGER.info("No unmerged segment found for table: {}, mergeLevel: {}", tableNameWithType, mergeLevel);
continue;
}
// Bump up watermark to the earliest start time of selected segments truncated to the closest bucket boundary
long newWatermarkMs = selectedSegmentsForAllBuckets.get(0).get(0).getStartTimeMs() / bucketMs * bucketMs;
mergeRollupTaskMetadata.getWatermarkMap().put(mergeLevel, newWatermarkMs);
LOGGER.info("Update watermark for table: {}, mergeLevel: {} from: {} to: {}", tableNameWithType, mergeLevel,
watermarkMs, newWatermarkMs);
// Update the delay metrics
if (!processAll) {
createOrUpdateDelayMetrics(tableNameWithType, mergeLevel, lowerMergeLevel, newWatermarkMs, bufferMs,
bucketMs);
}
// Create task configs
int maxNumRecordsPerTask =
mergeConfigs.get(MergeRollupTask.MAX_NUM_RECORDS_PER_TASK_KEY) != null ? Integer.parseInt(
mergeConfigs.get(MergeRollupTask.MAX_NUM_RECORDS_PER_TASK_KEY)) : DEFAULT_MAX_NUM_RECORDS_PER_TASK;
SegmentPartitionConfig segmentPartitionConfig = tableConfig.getIndexingConfig().getSegmentPartitionConfig();
if (segmentPartitionConfig == null) {
for (List<SegmentZKMetadata> selectedSegmentsPerBucket : selectedSegmentsForAllBuckets) {
pinotTaskConfigsForTable.addAll(
createPinotTaskConfigs(selectedSegmentsPerBucket, tableConfig, maxNumRecordsPerTask, mergeLevel,
null, mergeConfigs, taskConfigs));
}
} else {
// For partitioned table, schedule separate tasks for each partitionId (partitionId is constructed from
// partitions of all partition columns. There should be exact match between partition columns of segment and
// partition columns of table configuration, and there is only partition per column in segment metadata).
// Other segments which do not meet these conditions are considered as outlier segments, and additional tasks
// are generated for them.
Map<String, ColumnPartitionConfig> columnPartitionMap = segmentPartitionConfig.getColumnPartitionMap();
List<String> partitionColumns = new ArrayList<>(columnPartitionMap.keySet());
for (List<SegmentZKMetadata> selectedSegmentsPerBucket : selectedSegmentsForAllBuckets) {
Map<List<Integer>, List<SegmentZKMetadata>> partitionToSegments = new HashMap<>();
List<SegmentZKMetadata> outlierSegments = new ArrayList<>();
for (SegmentZKMetadata selectedSegment : selectedSegmentsPerBucket) {
SegmentPartitionMetadata segmentPartitionMetadata = selectedSegment.getPartitionMetadata();
List<Integer> partitions = new ArrayList<>();
if (segmentPartitionMetadata != null && columnPartitionMap.keySet()
.equals(segmentPartitionMetadata.getColumnPartitionMap().keySet())) {
for (String partitionColumn : partitionColumns) {
if (segmentPartitionMetadata.getPartitions(partitionColumn).size() == 1) {
partitions.add(segmentPartitionMetadata.getPartitions(partitionColumn).iterator().next());
} else {
partitions.clear();
break;
}
}
}
if (partitions.isEmpty()) {
outlierSegments.add(selectedSegment);
} else {
partitionToSegments.computeIfAbsent(partitions, k -> new ArrayList<>()).add(selectedSegment);
}
}
for (Map.Entry<List<Integer>, List<SegmentZKMetadata>> entry : partitionToSegments.entrySet()) {
List<Integer> partition = entry.getKey();
List<SegmentZKMetadata> partitionedSegments = entry.getValue();
pinotTaskConfigsForTable.addAll(
createPinotTaskConfigs(partitionedSegments, tableConfig, maxNumRecordsPerTask, mergeLevel,
partition, mergeConfigs, taskConfigs));
}
if (!outlierSegments.isEmpty()) {
pinotTaskConfigsForTable.addAll(
createPinotTaskConfigs(outlierSegments, tableConfig, maxNumRecordsPerTask, mergeLevel,
null, mergeConfigs, taskConfigs));
}
}
}
}
// Write updated watermark map to zookeeper
if (!processAll) {
try {
_clusterInfoAccessor
.setMinionTaskMetadata(mergeRollupTaskMetadata, MinionConstants.MergeRollupTask.TASK_TYPE,
expectedVersion);
} catch (ZkException e) {
LOGGER.error(
"Version changed while updating merge/rollup task metadata for table: {}, skip scheduling. There are "
+ "multiple task schedulers for the same table, need to investigate!", tableNameWithType);
continue;
}
}
pinotTaskConfigs.addAll(pinotTaskConfigsForTable);
LOGGER.info("Finished generating task configs for table: {} for task: {}, numTasks: {}", tableNameWithType,
taskType, pinotTaskConfigsForTable.size());
}
// Clean up metrics
cleanUpDelayMetrics(tableConfigs);
return pinotTaskConfigs;
}
|
@Test
public void testPartitionedTable() {
Map<String, Map<String, String>> taskConfigsMap = new HashMap<>();
Map<String, String> tableTaskConfigs = new HashMap<>();
tableTaskConfigs.put("daily.mergeType", "concat");
tableTaskConfigs.put("daily.bufferTimePeriod", "2d");
tableTaskConfigs.put("daily.bucketTimePeriod", "1d");
tableTaskConfigs.put("daily.maxNumRecordsPerSegment", "1000000");
taskConfigsMap.put(MinionConstants.MergeRollupTask.TASK_TYPE, tableTaskConfigs);
TableConfig offlineTableConfig =
new TableConfigBuilder(TableType.OFFLINE).setTableName(RAW_TABLE_NAME).setTimeColumnName(TIME_COLUMN_NAME)
.setSegmentPartitionConfig(new SegmentPartitionConfig(
Collections.singletonMap("memberId", new ColumnPartitionConfig("murmur", 10))))
.setTaskConfig(new TableTaskConfig(taskConfigsMap)).build();
String segmentName1 = "testTable__1";
String segmentName2 = "testTable__2";
String segmentName3 = "testTable__3";
String segmentName4 = "testTable__4";
SegmentZKMetadata metadata1 =
getSegmentZKMetadata(segmentName1, 86_400_000L, 90_000_000L, TimeUnit.MILLISECONDS, null);
metadata1.setPartitionMetadata(new SegmentPartitionMetadata(Collections.singletonMap("memberId",
new ColumnPartitionMetadata("murmur", 10, Collections.singleton(0), null))));
SegmentZKMetadata metadata2 =
getSegmentZKMetadata(segmentName2, 86_400_000L, 100_000_000L, TimeUnit.MILLISECONDS, null);
metadata2.setPartitionMetadata(new SegmentPartitionMetadata(Collections.singletonMap("memberId",
new ColumnPartitionMetadata("murmur", 10, Collections.singleton(0), null))));
SegmentZKMetadata metadata3 =
getSegmentZKMetadata(segmentName3, 86_400_000L, 110_000_000L, TimeUnit.MILLISECONDS, null);
metadata3.setPartitionMetadata(new SegmentPartitionMetadata(Collections.singletonMap("memberId",
new ColumnPartitionMetadata("murmur", 10, Collections.singleton(1), null))));
SegmentZKMetadata metadata4 =
getSegmentZKMetadata(segmentName4, 90_000_000L, 110_000_000L, TimeUnit.MILLISECONDS, null);
metadata4.setPartitionMetadata(new SegmentPartitionMetadata(Collections.singletonMap("memberId",
new ColumnPartitionMetadata("murmur", 10, Collections.singleton(1), null))));
ClusterInfoAccessor mockClusterInfoProvide = mock(ClusterInfoAccessor.class);
when(mockClusterInfoProvide.getSegmentsZKMetadata(OFFLINE_TABLE_NAME)).thenReturn(
Lists.newArrayList(metadata1, metadata2, metadata3, metadata4));
when(mockClusterInfoProvide.getIdealState(OFFLINE_TABLE_NAME)).thenReturn(
getIdealState(OFFLINE_TABLE_NAME, Lists.newArrayList(segmentName1, segmentName2, segmentName3, segmentName4)));
MergeRollupTaskGenerator generator = new MergeRollupTaskGenerator();
generator.init(mockClusterInfoProvide);
List<PinotTaskConfig> pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(offlineTableConfig));
assertEquals(pinotTaskConfigs.size(), 2);
String partitionedSegmentsGroup1 = segmentName1 + "," + segmentName2;
String partitionedSegmentsGroup2 = segmentName3 + "," + segmentName4;
boolean isPartitionedSegmentsGroup1Seen = false;
boolean isPartitionedSegmentsGroup2Seen = false;
for (PinotTaskConfig pinotTaskConfig : pinotTaskConfigs) {
if (!isPartitionedSegmentsGroup1Seen) {
isPartitionedSegmentsGroup1Seen =
pinotTaskConfig.getConfigs().get(MinionConstants.SEGMENT_NAME_KEY).equals(partitionedSegmentsGroup1);
}
if (!isPartitionedSegmentsGroup2Seen) {
isPartitionedSegmentsGroup2Seen =
pinotTaskConfig.getConfigs().get(MinionConstants.SEGMENT_NAME_KEY).equals(partitionedSegmentsGroup2);
}
assertTrue(isPartitionedSegmentsGroup1Seen || isPartitionedSegmentsGroup2Seen);
checkPinotTaskConfig(pinotTaskConfigs.get(0).getConfigs(), DAILY, "concat", "1d", null, "1000000");
}
assertTrue(isPartitionedSegmentsGroup1Seen && isPartitionedSegmentsGroup2Seen);
// With numMaxRecordsPerTask constraints
tableTaskConfigs.put("daily.maxNumRecordsPerTask", "5000000");
metadata1.setTotalDocs(2000000L);
metadata2.setTotalDocs(4000000L);
metadata3.setTotalDocs(5000000L);
metadata4.setTotalDocs(6000000L);
pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(offlineTableConfig));
assertEquals(pinotTaskConfigs.size(), 3);
isPartitionedSegmentsGroup1Seen = false;
isPartitionedSegmentsGroup2Seen = false;
boolean isPartitionedSegmentsGroup3Seen = false;
for (PinotTaskConfig pinotTaskConfig : pinotTaskConfigs) {
if (!isPartitionedSegmentsGroup1Seen) {
isPartitionedSegmentsGroup1Seen =
pinotTaskConfig.getConfigs().get(MinionConstants.SEGMENT_NAME_KEY).equals(partitionedSegmentsGroup1);
}
if (!isPartitionedSegmentsGroup2Seen) {
isPartitionedSegmentsGroup2Seen =
pinotTaskConfig.getConfigs().get(MinionConstants.SEGMENT_NAME_KEY).equals(segmentName3);
}
if (!isPartitionedSegmentsGroup3Seen) {
isPartitionedSegmentsGroup3Seen =
pinotTaskConfig.getConfigs().get(MinionConstants.SEGMENT_NAME_KEY).equals(segmentName4);
}
assertTrue(isPartitionedSegmentsGroup1Seen || isPartitionedSegmentsGroup2Seen || isPartitionedSegmentsGroup3Seen);
checkPinotTaskConfig(pinotTaskConfigs.get(1).getConfigs(), DAILY, "concat", "1d", null, "1000000");
}
assertTrue(isPartitionedSegmentsGroup1Seen && isPartitionedSegmentsGroup2Seen && isPartitionedSegmentsGroup3Seen);
}
|
@Override
public void submit(VplsOperation vplsOperation) {
if (isLeader) {
// Only leader can execute operation
addVplsOperation(vplsOperation);
}
}
|
@Test
public void testDoNothingOperation() {
VplsData vplsData = VplsData.of(VPLS1);
vplsData.addInterfaces(ImmutableSet.of(V100H1, V100H2));
VplsOperation vplsOperation = VplsOperation.of(vplsData,
VplsOperation.Operation.ADD);
vplsOperationManager.submit(vplsOperation);
vplsOperation = VplsOperation.of(vplsData,
VplsOperation.Operation.REMOVE);
vplsOperationManager.submit(vplsOperation);
assertAfter(OPERATION_DELAY, OPERATION_DURATION, () -> {
assertEquals(0, vplsOperationManager.pendingVplsOperations.size());
// Should not have any running operation
assertEquals(0, vplsOperationManager.runningOperations.size());
});
}
|
@Override
public State waitUntilFinish() {
return State.DONE;
}
|
@Test
public void testWaitUntilFinishReturnsDone() {
FlinkRunnerResult result = new FlinkRunnerResult(Collections.emptyMap(), 100);
assertThat(result.waitUntilFinish(), is(PipelineResult.State.DONE));
assertThat(result.waitUntilFinish(Duration.millis(100)), is(PipelineResult.State.DONE));
}
|
HasRuleEngineProfile getRuleEngineProfileForEntityOrElseNull(TenantId tenantId, EntityId entityId, TbMsg tbMsg) {
if (entityId.getEntityType().equals(EntityType.DEVICE)) {
if (TbMsgType.ENTITY_DELETED.equals(tbMsg.getInternalType())) {
try {
Device deletedDevice = JacksonUtil.fromString(tbMsg.getData(), Device.class);
if (deletedDevice == null) {
return null;
}
return deviceProfileCache.get(tenantId, deletedDevice.getDeviceProfileId());
} catch (Exception e) {
log.warn("[{}][{}] Failed to deserialize device: {}", tenantId, entityId, tbMsg, e);
return null;
}
} else {
return deviceProfileCache.get(tenantId, new DeviceId(entityId.getId()));
}
} else if (entityId.getEntityType().equals(EntityType.DEVICE_PROFILE)) {
return deviceProfileCache.get(tenantId, new DeviceProfileId(entityId.getId()));
} else if (entityId.getEntityType().equals(EntityType.ASSET)) {
if (TbMsgType.ENTITY_DELETED.equals(tbMsg.getInternalType())) {
try {
Asset deletedAsset = JacksonUtil.fromString(tbMsg.getData(), Asset.class);
if (deletedAsset == null) {
return null;
}
return assetProfileCache.get(tenantId, deletedAsset.getAssetProfileId());
} catch (Exception e) {
log.warn("[{}][{}] Failed to deserialize asset: {}", tenantId, entityId, tbMsg, e);
return null;
}
} else {
return assetProfileCache.get(tenantId, new AssetId(entityId.getId()));
}
} else if (entityId.getEntityType().equals(EntityType.ASSET_PROFILE)) {
return assetProfileCache.get(tenantId, new AssetProfileId(entityId.getId()));
}
return null;
}
|
@Test
public void testGetRuleEngineProfileForUpdatedAndDeletedDevice() {
DeviceId deviceId = new DeviceId(UUID.randomUUID());
TenantId tenantId = new TenantId(UUID.randomUUID());
DeviceProfileId deviceProfileId = new DeviceProfileId(UUID.randomUUID());
Device device = new Device(deviceId);
device.setDeviceProfileId(deviceProfileId);
// device updated
TbMsg tbMsg = TbMsg.builder().internalType(TbMsgType.ENTITY_UPDATED).build();
((DefaultTbClusterService) clusterService).getRuleEngineProfileForEntityOrElseNull(tenantId, deviceId, tbMsg);
verify(deviceProfileCache, times(1)).get(tenantId, deviceId);
// device deleted
tbMsg = TbMsg.builder().internalType(TbMsgType.ENTITY_DELETED).data(JacksonUtil.toString(device)).build();
((DefaultTbClusterService) clusterService).getRuleEngineProfileForEntityOrElseNull(tenantId, deviceId, tbMsg);
verify(deviceProfileCache, times(1)).get(tenantId, deviceProfileId);
}
|
public static GaussianProcessRegression<double[]> fit(double[][] x, double[] y, Properties params) {
MercerKernel<double[]> kernel = MercerKernel.of(params.getProperty("smile.gaussian_process.kernel", "linear"));
double noise = Double.parseDouble(params.getProperty("smile.gaussian_process.noise", "1E-10"));
boolean normalize = Boolean.parseBoolean(params.getProperty("smile.gaussian_process.normalize", "true"));
double tol = Double.parseDouble(params.getProperty("smile.gaussian_process.tolerance", "1E-5"));
int maxIter = Integer.parseInt(params.getProperty("smile.gaussian_process.iterations", "0"));
return fit(x, y, kernel, noise, normalize, tol, maxIter);
}
|
@Test
public void testOutOfBoundsException() throws Exception {
double[][] X = {
{4.543, 3.135, 0.86},
{5.159, 5.043, 1.53},
{5.366, 5.438, 1.57},
{5.759, 7.496, 1.81},
{4.663, 3.807, 0.99},
{5.697, 7.601, 1.09},
{5.892, 8.726, 1.29},
{6.078, 7.966, 1.78},
{4.898, 3.85, 1.29},
{5.242, 4.174, 1.58},
{5.74 , 6.142, 1.68},
{6.446, 7.908, 1.9 },
{4.477, 2.996, 1.06},
{5.236, 4.942, 1.3 },
{6.151, 6.752, 1.52},
{6.365, 9.588, 1.74},
{4.787, 3.912, 1.16},
{5.412, 4.7 , 1.49},
{5.247, 6.174, 1.63},
{5.438, 9.064, 1.99},
{4.564, 4.949, 1.15},
{5.298, 5.22, 1.33},
{5.455, 9.242, 1.44},
{5.855, 10.199, 2 },
{5.366, 3.664, 1.31},
{6.043, 3.219, 1.46},
{6.458, 6.962, 1.72},
{5.328, 3.912, 1.25},
{5.802, 6.685, 1.08},
{6.176, 4.787, 1.25}
};
double[] y = {
12.3, 20.9, 39, 47.9, 5.6, 25.9, 37.3, 21.9, 18.1, 21, 34.9, 57.2, 0.7, 25.9, 54.9,
40.9, 15.9, 6.4, 18, 38.9, 14, 15.2, 32, 56.71, 16.8, 11.6, 26.5, 0.7, 13.4, 5.5
};
GaussianProcessRegression model = GaussianProcessRegression.fit(
X, y, new GaussianKernel(3),
1e-5, false, 1e-5, 1024
);
System.out.println(model);
}
|
public static <T> RestResponse<T> fail() {
return new RestResponse<T>(-1);
}
|
@Test
public void testFail() {
Assert.assertFalse(RestResponse.fail().isSuccess());
Assert.assertEquals("error", RestResponse.fail("error").getMsg());
Assert.assertEquals("error", RestResponse.fail(500, "error").getMsg());
}
|
public void validate(Map<String, NewDocumentType> documentDefinitions,
Set<NewDocumentType> globallyDistributedDocuments) {
verifyReferredDocumentsArePresent(documentDefinitions);
verifyReferredDocumentsAreGlobal(documentDefinitions, globallyDistributedDocuments);
}
|
@Test
void validation_succeeds_if_referenced_document_is_global() {
NewDocumentType parent = createDocumentType("parent");
Fixture fixture = new Fixture()
.addGlobalDocument(parent)
.addNonGlobalDocument(createDocumentType("child", parent));
validate(fixture);
}
|
@Override
public Mono<Boolean> confirmPassword(String username, String rawPassword) {
return getUser(username)
.filter(user -> {
if (!StringUtils.hasText(user.getSpec().getPassword())) {
// If the password is not set, return true directly.
return true;
}
if (!StringUtils.hasText(rawPassword)) {
return false;
}
return passwordEncoder.matches(rawPassword, user.getSpec().getPassword());
})
.hasElement();
}
|
@Test
void confirmPasswordWhenPasswordNotSet() {
var user = new User();
user.setSpec(new User.UserSpec());
when(client.get(User.class, "fake-user")).thenReturn(Mono.just(user));
userService.confirmPassword("fake-user", "fake-password")
.as(StepVerifier::create)
.expectNext(true)
.verifyComplete();
user.getSpec().setPassword("");
userService.confirmPassword("fake-user", "fake-password")
.as(StepVerifier::create)
.expectNext(true)
.verifyComplete();
}
|
@Udf(description = "Converts a TIMESTAMP value into the"
+ " string representation of the timestamp in the given format. Single quotes in the"
+ " timestamp format can be escaped with '', for example: 'yyyy-MM-dd''T''HH:mm:ssX'"
+ " The system default time zone is used when no time zone is explicitly provided."
+ " The format pattern should be in the format expected"
+ " by java.time.format.DateTimeFormatter")
public String formatTimestamp(
@UdfParameter(
description = "TIMESTAMP value.") final Timestamp timestamp,
@UdfParameter(
description = "The format pattern should be in the format expected by"
+ " java.time.format.DateTimeFormatter.") final String formatPattern) {
return formatTimestamp(timestamp, formatPattern, ZoneId.of("GMT").getId());
}
|
@Test
public void testTimeZoneInPacificTime() {
// Given:
final Timestamp timestamp = new Timestamp(1534353043000L);
// When:
final String pacificTime = udf.formatTimestamp(timestamp,
"yyyy-MM-dd HH:mm:ss zz", "America/Los_Angeles");
// Then:
assertThat(pacificTime,
either(is("2018-08-15 10:10:43 PDT")) // Java 8 and below.
.or(is("2018-08-15 10:10:43 GMT-07:00"))); // Java 9 and above.
}
|
public ConvertedTime getConvertedTime(long duration) {
Set<Seconds> keys = RULES.keySet();
for (Seconds seconds : keys) {
if (duration <= seconds.getSeconds()) {
return RULES.get(seconds).getConvertedTime(duration);
}
}
return new TimeConverter.OverTwoYears().getConvertedTime(duration);
}
|
@Test
public void testShouldReport12MonthsFor59Days23Hours59Minutes30Seconds() throws Exception {
assertEquals(TimeConverter.ABOUT_X_MONTHS_AGO.argument(12), timeConverter
.getConvertedTime(365 * TimeConverter.DAY_IN_SECONDS - 31));
}
|
public static void checkArgument(final boolean condition, final String errorMessage) {
if (!condition) {
throw new IllegalArgumentException(errorMessage);
}
}
|
@Test
void assertCheckArgumentFailed() {
assertThrows(IllegalArgumentException.class, () -> PluginPreconditions.checkArgument(false, "Port `-3306` of MySQL Service must be a positive number."));
}
|
@Override
public void onComplete(final Request request) {
final RequestInfo requestInfo = getRequestInfo(request);
final List<Tag> tags = new ArrayList<>(5);
tags.add(Tag.of(PATH_TAG, requestInfo.path()));
tags.add(Tag.of(METHOD_TAG, requestInfo.method()));
tags.add(Tag.of(STATUS_CODE_TAG, String.valueOf(requestInfo.statusCode())));
tags.add(Tag.of(TRAFFIC_SOURCE_TAG, TrafficSource.HTTP.name().toLowerCase()));
final Tag platformTag = UserAgentTagUtil.getPlatformTag(requestInfo.userAgent());
tags.add(platformTag);
meterRegistry.counter(REQUEST_COUNTER_NAME, tags).increment();
UserAgentTagUtil.getClientVersionTag(requestInfo.userAgent(), clientReleaseManager).ifPresent(
clientVersionTag -> meterRegistry.counter(REQUESTS_BY_VERSION_COUNTER_NAME,
Tags.of(clientVersionTag, platformTag)).increment());
}
|
@Test
@SuppressWarnings("unchecked")
void testRequests() {
final String path = "/test";
final String method = "GET";
final int statusCode = 200;
final HttpURI httpUri = mock(HttpURI.class);
when(httpUri.getPath()).thenReturn(path);
final Request request = mock(Request.class);
when(request.getMethod()).thenReturn(method);
when(request.getHeader(HttpHeaders.USER_AGENT)).thenReturn("Signal-Android/4.53.7 (Android 8.1)");
when(request.getHttpURI()).thenReturn(httpUri);
final Response response = mock(Response.class);
when(response.getStatus()).thenReturn(statusCode);
when(request.getResponse()).thenReturn(response);
final ExtendedUriInfo extendedUriInfo = mock(ExtendedUriInfo.class);
when(request.getAttribute(MetricsHttpChannelListener.URI_INFO_PROPERTY_NAME)).thenReturn(extendedUriInfo);
when(extendedUriInfo.getMatchedTemplates()).thenReturn(List.of(new UriTemplate(path)));
final ArgumentCaptor<Iterable<Tag>> tagCaptor = ArgumentCaptor.forClass(Iterable.class);
listener.onComplete(request);
verify(requestCounter).increment();
verify(meterRegistry).counter(eq(MetricsHttpChannelListener.REQUEST_COUNTER_NAME), tagCaptor.capture());
final Set<Tag> tags = new HashSet<>();
for (final Tag tag : tagCaptor.getValue()) {
tags.add(tag);
}
assertEquals(5, tags.size());
assertTrue(tags.contains(Tag.of(MetricsHttpChannelListener.PATH_TAG, path)));
assertTrue(tags.contains(Tag.of(MetricsHttpChannelListener.METHOD_TAG, method)));
assertTrue(tags.contains(Tag.of(MetricsHttpChannelListener.STATUS_CODE_TAG, String.valueOf(statusCode))));
assertTrue(
tags.contains(Tag.of(MetricsHttpChannelListener.TRAFFIC_SOURCE_TAG, TrafficSource.HTTP.name().toLowerCase())));
assertTrue(tags.contains(Tag.of(UserAgentTagUtil.PLATFORM_TAG, "android")));
}
|
@Udf
public String concatWS(
@UdfParameter(description = "Separator string and values to join") final String... inputs) {
if (inputs == null || inputs.length < 2) {
throw new KsqlFunctionException("Function Concat_WS expects at least two input arguments.");
}
final String separator = inputs[0];
if (separator == null) {
return null;
}
return Arrays.stream(inputs, 1,
inputs.length)
.filter(Objects::nonNull)
.collect(Collectors.joining(separator));
}
|
@Test
public void shouldFailIfOnlySeparatorBytesInput() {
// When:
final KsqlException e = assertThrows(KsqlFunctionException.class, () -> udf.concatWS(ByteBuffer.wrap(new byte[] {3})));
// Then:
assertThat(e.getMessage(), containsString("expects at least two input arguments"));
}
|
public int[] mappings() {
int[] cleansed = new int[mapping.length];
for (int i = 0; i < mapping.length; i++) {
cleansed[i] = unmask(mapping[i]);
}
return cleansed;
}
|
@Test
public void testMappings() {
instance.setMapping(0, 2 | VariableMapper.REMAP_FLAG, 1);
instance.setMapping(1, VariableMapper.REMAP_FLAG | VariableMapper.DOUBLE_SLOT_FLAG, 2);
assertEquals(2, instance.map(0));
assertEquals(0, instance.map(1));
}
|
public static Map<String, Object> replaceKeyCharacter(Map<String, Object> map, char oldChar, char newChar) {
final Map<String, Object> result = new HashMap<>(map.size());
for (Map.Entry<String, Object> entry : map.entrySet()) {
final String key = entry.getKey().replace(oldChar, newChar);
final Object value = entry.getValue();
result.put(key, value);
}
return result;
}
|
@Test
public void replaceKeyCharacterHandlesEmptyMap() {
assertThat(MapUtils.replaceKeyCharacter(Collections.emptyMap(), '.', '_')).isEmpty();
}
|
@SuppressWarnings("unchecked")
public static <T> T convert(Class<T> klass, String value) {
if (Strings.isNullOrEmpty(value)) {
throw new IllegalArgumentException("Value must not be empty.");
}
if (Objects.isNull(converters.get(klass))) {
throw new IllegalArgumentException("No conversion supported for given class.");
}
return (T)converters.get(klass).apply(value);
}
|
@Test(expected = IllegalArgumentException.class)
public void testEmptyValue() {
PasswordParamConverter.convert(Double.class, "");
}
|
public static <K, InputT> GroupIntoBatches<K, InputT> ofSize(long batchSize) {
Preconditions.checkState(batchSize < Long.MAX_VALUE);
return new GroupIntoBatches<K, InputT>(BatchingParams.createDefault()).withSize(batchSize);
}
|
@Test
@Category({
ValidatesRunner.class,
NeedsRunner.class,
UsesTimersInParDo.class,
UsesTestStream.class,
UsesStatefulParDo.class,
UsesOnWindowExpiration.class
})
public void testInStreamingMode() {
int timestampInterval = 1;
Instant startInstant = new Instant(0L);
TestStream.Builder<KV<String, String>> streamBuilder =
TestStream.create(KvCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of()))
.advanceWatermarkTo(startInstant);
long offset = 0L;
for (KV<String, String> element : data) {
streamBuilder =
streamBuilder.addElements(
TimestampedValue.of(
element,
startInstant.plus(Duration.standardSeconds(offset * timestampInterval))));
offset++;
}
final long windowDuration = 6;
TestStream<KV<String, String>> stream =
streamBuilder
.advanceWatermarkTo(startInstant.plus(Duration.standardSeconds(windowDuration - 1)))
.advanceWatermarkTo(startInstant.plus(Duration.standardSeconds(windowDuration + 1)))
.advanceWatermarkTo(startInstant.plus(Duration.standardSeconds(EVEN_NUM_ELEMENTS)))
.advanceWatermarkToInfinity();
PCollection<KV<String, String>> inputCollection =
pipeline
.apply(stream)
.apply(
Window.<KV<String, String>>into(
FixedWindows.of(Duration.standardSeconds(windowDuration)))
.withAllowedLateness(Duration.millis(ALLOWED_LATENESS)));
inputCollection.apply(
ParDo.of(
new DoFn<KV<String, String>, Void>() {
@ProcessElement
public void processElement(ProcessContext c, BoundedWindow window) {
LOG.debug(
"*** ELEMENT: ({},{}) *** with timestamp {} in window {}",
c.element().getKey(),
c.element().getValue(),
c.timestamp(),
window);
}
}));
PCollection<KV<String, Iterable<String>>> outputCollection =
inputCollection
.apply(GroupIntoBatches.ofSize(BATCH_SIZE))
.setCoder(KvCoder.of(StringUtf8Coder.of(), IterableCoder.of(StringUtf8Coder.of())));
// elements have the same key and collection is divided into windows,
// so Count.perKey values are the number of elements in windows
PCollection<KV<String, Long>> countOutput =
outputCollection.apply(
"Count elements in windows after applying GroupIntoBatches", Count.perKey());
PAssert.that("Wrong number of elements in windows after GroupIntoBatches", countOutput)
.satisfies(
input -> {
Iterator<KV<String, Long>> inputIterator = input.iterator();
// first element
long count0 = inputIterator.next().getValue();
// window duration is 6 and batch size is 5, so there should be 2 elements in the
// window (flush because batchSize reached and for end of window reached)
assertEquals("Wrong number of elements in first window", 2, count0);
// second element
long count1 = inputIterator.next().getValue();
// collection is 10 elements, there is only 4 elements left, so there should be only
// one element in the window (flush because end of window/collection reached)
assertEquals("Wrong number of elements in second window", 1, count1);
// third element
return null;
});
PAssert.that("Incorrect output collection after GroupIntoBatches", outputCollection)
.satisfies(
input -> {
Iterator<KV<String, Iterable<String>>> inputIterator = input.iterator();
// first element
int size0 = Iterables.size(inputIterator.next().getValue());
// window duration is 6 and batch size is 5, so output batch size should de 5
// (flush because of batchSize reached)
assertEquals("Wrong first element batch Size", 5, size0);
// second element
int size1 = Iterables.size(inputIterator.next().getValue());
// there is only one element left in the window so batch size should be 1
// (flush because of end of window reached)
assertEquals("Wrong second element batch Size", 1, size1);
// third element
int size2 = Iterables.size(inputIterator.next().getValue());
// collection is 10 elements, there is only 4 left, so batch size should be 4
// (flush because end of collection reached)
assertEquals("Wrong third element batch Size", 4, size2);
return null;
});
pipeline.run().waitUntilFinish();
}
|
void checkUrlPattern(String url, String message, Object... messageArguments) {
try {
HttpUrl okUrl = HttpUrl.parse(url);
if (okUrl == null) {
throw new IllegalArgumentException(String.format(message, messageArguments));
}
InetAddress address = InetAddress.getByName(okUrl.host());
if (configuration.getBoolean(SONAR_VALIDATE_WEBHOOKS_PROPERTY)
.orElse(SONAR_VALIDATE_WEBHOOKS_DEFAULT_VALUE)
&& (address.isLoopbackAddress() || address.isAnyLocalAddress() || isLocalAddress(address))) {
throw new IllegalArgumentException("Invalid URL: loopback and wildcard addresses are not allowed for webhooks.");
}
} catch (UnknownHostException e) {
// if a host can not be resolved the deliveries will fail - no need to block it from being set
// this will only happen for public URLs
} catch (SocketException e) {
throw new IllegalStateException("Can not retrieve a network interfaces", e);
}
}
|
@Test
@UseDataProvider("loopbackUrls")
public void checkUrlPatternSuccessfulForLoopbackAddressWhenSonarValidateWebhooksPropertyDisabled(String url) {
when(configuration.getBoolean("sonar.validateWebhooks")).thenReturn(of(false));
assertThatCode(() -> underTest.checkUrlPattern(url, "msg")).doesNotThrowAnyException();
}
|
@Override
public List<SnowflakeIdentifier> listDatabases() {
List<SnowflakeIdentifier> databases;
try {
databases =
connectionPool.run(
conn ->
queryHarness.query(
conn, "SHOW DATABASES IN ACCOUNT", DATABASE_RESULT_SET_HANDLER));
} catch (SQLException e) {
throw snowflakeExceptionToIcebergException(
SnowflakeIdentifier.ofRoot(), e, "Failed to list databases");
} catch (InterruptedException e) {
throw new UncheckedInterruptedException(e, "Interrupted while listing databases");
}
databases.forEach(
db ->
Preconditions.checkState(
db.type() == SnowflakeIdentifier.Type.DATABASE,
"Expected DATABASE, got identifier '%s'",
db));
return databases;
}
|
@SuppressWarnings("unchecked")
@Test
public void testListDatabasesInterruptedException() throws SQLException, InterruptedException {
Exception injectedException = new InterruptedException("Fake interrupted exception");
when(mockClientPool.run(any(ClientPool.Action.class))).thenThrow(injectedException);
assertThatExceptionOfType(UncheckedInterruptedException.class)
.isThrownBy(() -> snowflakeClient.listDatabases())
.withMessageContaining("Interrupted while listing databases")
.withCause(injectedException);
}
|
@Override
public void resetLocal() {
this.min = Long.MAX_VALUE;
}
|
@Test
void testResetLocal() {
LongMinimum min = new LongMinimum();
long value = 9876543210L;
min.add(value);
assertThat(min.getLocalValue().longValue()).isEqualTo(value);
min.resetLocal();
assertThat(min.getLocalValue().longValue()).isEqualTo(Long.MAX_VALUE);
}
|
@VisibleForTesting
static boolean isRichConsole(ConsoleOutput consoleOutput, HttpTraceLevel httpTraceLevel) {
if (httpTraceLevel != HttpTraceLevel.off) {
return false;
}
switch (consoleOutput) {
case plain:
return false;
case auto:
// Enables progress footer when ANSI is supported (Windows or TERM not 'dumb').
return System.getProperty("os.name").startsWith("windows")
|| (System.console() != null && !"dumb".equals(System.getenv("TERM")));
case rich:
default:
return true;
}
}
|
@Test
public void testIsRightConsole_autoWindowsTrue() {
System.setProperty("os.name", "windows");
assertThat(CliLogger.isRichConsole(ConsoleOutput.auto, HttpTraceLevel.off)).isTrue();
}
|
public void startThreads() throws KettleException {
// Now prepare to start all the threads...
//
nrOfFinishedSteps = 0;
nrOfActiveSteps = 0;
ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.TransformationStartThreads.id, this );
fireTransStartedListeners();
for ( int i = 0; i < steps.size(); i++ ) {
final StepMetaDataCombi sid = steps.get( i );
sid.step.markStart();
sid.step.initBeforeStart();
// also attach a Step Listener to detect when we're done...
//
StepListener stepListener = new StepListener() {
@Override
public void stepActive( Trans trans, StepMeta stepMeta, StepInterface step ) {
nrOfActiveSteps++;
if ( nrOfActiveSteps == 1 ) {
// Transformation goes from in-active to active...
// PDI-5229 sync added
synchronized ( transListeners ) {
for ( TransListener listener : transListeners ) {
listener.transActive( Trans.this );
}
}
}
}
@Override
public void stepFinished( Trans trans, StepMeta stepMeta, StepInterface step ) {
synchronized ( Trans.this ) {
nrOfFinishedSteps++;
if ( nrOfFinishedSteps >= steps.size() ) {
// Set the finished flag
//
setFinished( true );
// Grab the performance statistics one last time (if enabled)
//
addStepPerformanceSnapShot();
try {
fireTransFinishedListeners();
} catch ( Exception e ) {
step.setErrors( step.getErrors() + 1L );
log.logError( getName() + " : " + BaseMessages.getString( PKG,
"Trans.Log.UnexpectedErrorAtTransformationEnd" ), e );
}
}
// If a step fails with an error, we want to kill/stop the others
// too...
//
if ( step.getErrors() > 0 ) {
log.logMinimal( BaseMessages.getString( PKG, "Trans.Log.TransformationDetectedErrors" ) );
log.logMinimal( BaseMessages.getString( PKG, "Trans.Log.TransformationIsKillingTheOtherSteps" ) );
killAllNoWait();
}
}
}
};
// Make sure this is called first!
//
if ( sid.step instanceof BaseStep ) {
( (BaseStep) sid.step ).getStepListeners().add( 0, stepListener );
} else {
sid.step.addStepListener( stepListener );
}
}
if ( transMeta.isCapturingStepPerformanceSnapShots() ) {
stepPerformanceSnapshotSeqNr = new AtomicInteger( 0 );
stepPerformanceSnapShots = new ConcurrentHashMap<>();
// Calculate the maximum number of snapshots to be kept in memory
//
String limitString = environmentSubstitute( transMeta.getStepPerformanceCapturingSizeLimit() );
if ( Utils.isEmpty( limitString ) ) {
limitString = EnvUtil.getSystemProperty( Const.KETTLE_STEP_PERFORMANCE_SNAPSHOT_LIMIT );
}
stepPerformanceSnapshotSizeLimit = Const.toInt( limitString, 0 );
// Set a timer to collect the performance data from the running threads...
//
stepPerformanceSnapShotTimer = new Timer( "stepPerformanceSnapShot Timer: " + transMeta.getName() );
TimerTask timerTask = new TimerTask() {
@Override
public void run() {
if ( !isFinished() ) {
addStepPerformanceSnapShot();
}
}
};
stepPerformanceSnapShotTimer.schedule( timerTask, 100, transMeta.getStepPerformanceCapturingDelay() );
}
// Now start a thread to monitor the running transformation...
//
setFinished( false );
setPaused( false );
setStopped( false );
transFinishedBlockingQueue = new ArrayBlockingQueue<>( TRANS_FINISHED_BLOCKING_QUEUE_SIZE );
TransListener transListener = new TransAdapter() {
@Override
public void transFinished( Trans trans ) {
try {
shutdownHeartbeat( trans != null ? trans.heartbeat : null );
if ( trans != null && transMeta.getParent() == null && trans.parentJob == null && trans.parentTrans == null ) {
if ( log.isDetailed() && transMeta.getMetaFileCache() != null ) {
transMeta.getMetaFileCache().logCacheSummary( log );
}
transMeta.setMetaFileCache( null );
}
ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.TransformationFinish.id, trans );
} catch ( KettleException e ) {
throw new RuntimeException( "Error calling extension point at end of transformation", e );
}
// First of all, stop the performance snapshot timer if there is is
// one...
//
if ( transMeta.isCapturingStepPerformanceSnapShots() && stepPerformanceSnapShotTimer != null ) {
stepPerformanceSnapShotTimer.cancel();
}
transMeta.disposeEmbeddedMetastoreProvider();
setFinished( true );
setRunning( false ); // no longer running
log.snap( Metrics.METRIC_TRANSFORMATION_EXECUTION_STOP );
// If the user ran with metrics gathering enabled and a metrics logging table is configured, add another
// listener...
//
MetricsLogTable metricsLogTable = transMeta.getMetricsLogTable();
if ( metricsLogTable.isDefined() ) {
try {
writeMetricsInformation();
} catch ( Exception e ) {
log.logError( "Error writing metrics information", e );
errors.incrementAndGet();
}
}
// Close the unique connections when running database transactionally.
// This will commit or roll back the transaction based on the result of this transformation.
//
if ( transMeta.isUsingUniqueConnections() ) {
trans.closeUniqueDatabaseConnections( getResult() );
}
// release unused vfs connections
KettleVFS.freeUnusedResources();
}
};
// This should always be done first so that the other listeners achieve a clean state to start from (setFinished and
// so on)
//
transListeners.add( 0, transListener );
setRunning( true );
switch ( transMeta.getTransformationType() ) {
case Normal:
// Now start all the threads...
//
for ( int i = 0; i < steps.size(); i++ ) {
final StepMetaDataCombi combi = steps.get( i );
RunThread runThread = new RunThread( combi );
Thread thread = new Thread( runThread );
thread.setName( getName() + " - " + combi.stepname );
ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.StepBeforeStart.id, combi );
// Call an extension point at the end of the step
//
combi.step.addStepListener( new StepAdapter() {
@Override
public void stepFinished( Trans trans, StepMeta stepMeta, StepInterface step ) {
try {
ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.StepFinished.id, combi );
} catch ( KettleException e ) {
throw new RuntimeException( "Unexpected error in calling extension point upon step finish", e );
}
}
} );
thread.start();
}
break;
case SerialSingleThreaded:
new Thread( new Runnable() {
@Override
public void run() {
try {
// Always disable thread priority management, it will always slow us
// down...
//
for ( StepMetaDataCombi combi : steps ) {
combi.step.setUsingThreadPriorityManagment( false );
}
//
// This is a single threaded version...
//
// Sort the steps from start to finish...
//
Collections.sort( steps, new Comparator<StepMetaDataCombi>() {
@Override
public int compare( StepMetaDataCombi c1, StepMetaDataCombi c2 ) {
boolean c1BeforeC2 = transMeta.findPrevious( c2.stepMeta, c1.stepMeta );
if ( c1BeforeC2 ) {
return -1;
} else {
return 1;
}
}
} );
boolean[] stepDone = new boolean[ steps.size() ];
int nrDone = 0;
while ( nrDone < steps.size() && !isStopped() ) {
for ( int i = 0; i < steps.size() && !isStopped(); i++ ) {
StepMetaDataCombi combi = steps.get( i );
if ( !stepDone[ i ] ) {
boolean cont = combi.step.processRow( combi.meta, combi.data );
if ( !cont ) {
stepDone[ i ] = true;
nrDone++;
}
}
}
}
} catch ( Exception e ) {
errors.addAndGet( 1 );
log.logError( "Error executing single threaded", e );
} finally {
for ( StepMetaDataCombi combi : steps ) {
combi.step.dispose( combi.meta, combi.data );
combi.step.markStop();
}
}
}
} ).start();
break;
case SingleThreaded:
// Don't do anything, this needs to be handled by the transformation
// executor!
//
break;
default:
break;
}
ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.TransformationStart.id, this );
heartbeat = startHeartbeat( getHeartbeatIntervalInSeconds() );
if ( steps.isEmpty() ) {
fireTransFinishedListeners();
}
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.TransformationHasAllocated", String.valueOf( steps
.size() ), String.valueOf( rowsets.size() ) ) );
}
}
|
@Test
public void testTransStartListenersConcurrentModification() throws Exception {
CountDownLatch start = new CountDownLatch( 1 );
TransFinishListenerAdder add = new TransFinishListenerAdder( trans, start );
TransStartListenerFirer starter = new TransStartListenerFirer( trans, start );
startThreads( add, starter, start );
assertEquals( "All listeners are added: no ConcurrentModificationException", count, add.c );
assertEquals( "All Start listeners are iterated over: no ConcurrentModificationException", count, starter.c );
}
|
public void analyze(ConnectContext context) {
dbName = AnalyzerUtils.getOrDefaultDatabase(dbName, context);
FeNameFormat.checkLabel(labelName);
}
|
@Test(expected = SemanticException.class)
public void testNoLabel() throws SemanticException {
new Expectations() {
{
analyzer.getDefaultDb();
minTimes = 0;
result = "testDb";
}
};
LabelName label = new LabelName("", "");
label.analyze(new ConnectContext());
Assert.fail("No exception throws");
}
|
@Override
public Optional<ComputationConfig> fetchConfig(String computationId) {
Preconditions.checkArgument(
!computationId.isEmpty(),
"computationId is empty. Cannot fetch computation config without a computationId.");
GetConfigResponse response =
applianceComputationConfigFetcher.fetchConfig(
GetConfigRequest.newBuilder().addComputations(computationId).build());
if (response == null) {
return Optional.empty();
}
for (Windmill.GetConfigResponse.SystemNameToComputationIdMapEntry entry :
response.getSystemNameToComputationIdMapList()) {
systemNameToComputationIdMap.put(entry.getSystemName(), entry.getComputationId());
}
return createComputationConfig(
// We are only fetching the config for 1 computation, so we should only be getting that
// computation back.
Iterables.getOnlyElement(response.getCloudWorksList()),
transformUserNameToStateFamilyByComputationId(response),
response.getNameMapList().stream()
.collect(toImmutableMap(NameMapEntry::getUserName, NameMapEntry::getSystemName)));
}
|
@Test
public void testGetComputationConfig() throws IOException {
List<Windmill.GetConfigResponse.NameMapEntry> nameMapEntries =
ImmutableList.of(
Windmill.GetConfigResponse.NameMapEntry.newBuilder()
.setUserName("userName1")
.setSystemName("systemName1")
.build(),
Windmill.GetConfigResponse.NameMapEntry.newBuilder()
.setUserName("userName2")
.setSystemName("userName2")
.build());
String serializedMapTask =
Transport.getJsonFactory()
.toString(
new MapTask()
.setSystemName("systemName")
.setStageName("stageName")
.setInstructions(ImmutableList.of()));
Windmill.GetConfigResponse getConfigResponse =
Windmill.GetConfigResponse.newBuilder()
.addAllNameMap(nameMapEntries)
.addComputationConfigMap(
Windmill.GetConfigResponse.ComputationConfigMapEntry.newBuilder()
.setComputationId("systemName")
.setComputationConfig(
Windmill.ComputationConfig.newBuilder()
.addTransformUserNameToStateFamily(
Windmill.ComputationConfig.TransformUserNameToStateFamilyEntry
.newBuilder()
.setStateFamily("stateFamilyName")
.setTransformUserName("transformUserName")
.build())
.build())
.build())
.addCloudWorks(serializedMapTask)
.build();
ComputationConfig expectedConfig =
ComputationConfig.create(
Transport.getJsonFactory().fromString(serializedMapTask, MapTask.class),
getConfigResponse.getComputationConfigMapList().stream()
.map(Windmill.GetConfigResponse.ComputationConfigMapEntry::getComputationConfig)
.flatMap(
computationConfig ->
computationConfig.getTransformUserNameToStateFamilyList().stream())
.collect(
toMap(
Windmill.ComputationConfig.TransformUserNameToStateFamilyEntry
::getTransformUserName,
Windmill.ComputationConfig.TransformUserNameToStateFamilyEntry
::getStateFamily)),
nameMapEntries.stream()
.collect(
toMap(
Windmill.GetConfigResponse.NameMapEntry::getUserName,
Windmill.GetConfigResponse.NameMapEntry::getSystemName)));
StreamingApplianceComputationConfigFetcher configLoader =
createStreamingApplianceConfigLoader();
when(mockWindmillServer.getConfig(any())).thenReturn(getConfigResponse);
Optional<ComputationConfig> config = configLoader.fetchConfig("someComputationId");
assertTrue(config.isPresent());
assertThat(config.get()).isEqualTo(expectedConfig);
}
|
@SneakyThrows
public static String generatePolicy(Set<CredentialContext.Privilege> privileges, List<String> locations) {
JsonNode policyRoot = loadYaml(POLICY_STATEMENT);
ArrayNode policyStatement = (ArrayNode) policyRoot.findPath("Statement");
JsonNode operationsStatement = loadYaml(OPERATION_STATEMENT);
policyStatement.add(operationsStatement);
// Add the appropriate S3 operations for the privileges requested
ArrayNode actions = (ArrayNode) operationsStatement.findPath("Action");
if (privileges.contains(CredentialContext.Privilege.UPDATE)) {
UPDATE_ACTIONS.forEach(actions::add);
} else if (privileges.contains(CredentialContext.Privilege.SELECT)) {
SELECT_ACTIONS.forEach(actions::add);
} else {
throw new NotAuthorizedException(
"Can't generate policy for unknown privileges '%s' for locations: '%s'"
.formatted(privileges, locations));
}
// Group each location by s3 bucket it's located in, then for each
// bucket, add the bucket arn for the listBucket and operations statements,
// then add each path as a conditional prefix
getBucketToPathsMap(locations).forEach(
(bucketName, paths) -> {
JsonNode listStatement = loadYaml(BUCKET_STATEMENT);
policyStatement.add(listStatement);
ArrayNode bucketResource = (ArrayNode) listStatement.findPath("Resource");
ArrayNode operationsResource = (ArrayNode) operationsStatement.findPath("Resource");
bucketResource.add("arn:aws:s3:::%s".formatted(bucketName));
ArrayNode conditionalPrefixes = (ArrayNode) listStatement.findPath("s3:prefix");
paths.forEach(
path -> {
// remove any preceding forward slashes
// TODO: potentially sanitize/encode the whole path to deal with problematic chars
String sanitizedPath = path.replaceAll("^/+", "");
if (sanitizedPath.isEmpty()) {
conditionalPrefixes.add("*");
operationsResource.add("arn:aws:s3:::%s/*".formatted(bucketName));
} else {
conditionalPrefixes.add(sanitizedPath);
conditionalPrefixes.add(sanitizedPath + "/");
conditionalPrefixes.add(sanitizedPath + "/*");
operationsResource.add("arn:aws:s3:::%s/%s/*".formatted(bucketName, sanitizedPath));
operationsResource.add("arn:aws:s3:::%s/%s".formatted(bucketName, sanitizedPath));
}
});
});
return JSON_MAPPER.writeValueAsString(policyRoot);
}
|
@Test
public void testPolicyWithStorageProfileLocations() {
String updatePolicy =
AwsPolicyGenerator.generatePolicy(
Set.of(SELECT, UPDATE),
List.of(
"s3://my-bucket1/path1/table1", "s3://profile-bucket2/", "s3://profile-bucket3"));
assertThat(updatePolicy)
.contains("s3:PutO*")
.contains("s3:GetO*")
.contains("s3:DeleteO*")
.contains("arn:aws:s3:::profile-bucket2/*")
.contains("arn:aws:s3:::profile-bucket3/*");
String selectPolicy =
AwsPolicyGenerator.generatePolicy(
Set.of(SELECT),
List.of(
"s3://my-bucket1/path1/table1",
"s3://my-bucket2/path2/table2",
"s3://my-bucket1/path3/table3"));
assertThat(selectPolicy)
.doesNotContain("s3:PutO*")
.doesNotContain("s3:DeleteO*")
.contains("s3:GetO*");
}
|
public static boolean isContentType(String contentType, Message message) {
if (contentType == null) {
return message.getContentType() == null;
} else {
return contentType.equals(message.getContentType());
}
}
|
@Test
public void testIsContentTypeWithNullStringValueAndNonNullMessageContentType() {
Message message = Proton.message();
message.setContentType("test");
assertFalse(AmqpMessageSupport.isContentType(null, message));
}
|
public Map<String, MetaProperties> logDirProps() {
return logDirProps;
}
|
@Test
public void testCopierGenerateValidDirectoryId() {
MetaPropertiesMockRandom random = new MetaPropertiesMockRandom();
MetaPropertiesEnsemble.Copier copier = new MetaPropertiesEnsemble.Copier(EMPTY);
copier.setRandom(random);
copier.logDirProps().put("/tmp/dir1",
new MetaProperties.Builder().
setVersion(MetaPropertiesVersion.V1).
setClusterId("PpYMbsoRQV-589isZzNzEw").
setNodeId(0).
setDirectoryId(new Uuid(2336837413447398698L, 1758400403264101670L)).
build());
copier.logDirProps().put("/tmp/dir2",
new MetaProperties.Builder().
setVersion(MetaPropertiesVersion.V1).
setClusterId("PpYMbsoRQV-589isZzNzEw").
setNodeId(0).
setDirectoryId(new Uuid(4341931186263415792L, 6389410885970711333L)).
build());
// Verify that we ignore the non-safe IDs, or the IDs that have already been used,
// when invoking generateValidDirectoryId.
assertEquals(new Uuid(7265008559332826740L, 3478747443029687715L),
copier.generateValidDirectoryId());
}
|
@Nullable
public byte[] getValue() {
return mValue;
}
|
@Test
public void setValue_FLOAT_basic() {
final MutableData data = new MutableData(new byte[4]);
data.setValue(1.0f, Data.FORMAT_FLOAT, 0);
assertArrayEquals(new byte[] { 1, 0, 0, 0 }, data.getValue());
}
|
@Override
public Map<String, StepTransition> translate(WorkflowInstance workflowInstance) {
WorkflowInstance instance = objectMapper.convertValue(workflowInstance, WorkflowInstance.class);
if (instance.getRunConfig() != null) {
if (instance.getRunConfig().getPolicy() == RunPolicy.RESTART_FROM_INCOMPLETE
|| instance.getRunConfig().getPolicy() == RunPolicy.RESTART_FROM_SPECIFIC) {
Map<String, StepInstance.Status> statusMap =
instance.getAggregatedInfo().getStepAggregatedViews().entrySet().stream()
.collect(
Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().getStatus()));
if (!statusMap.isEmpty()) {
instance
.getRunConfig()
.setStartStepIds(
statusMap.entrySet().stream()
.filter(
entry ->
!entry.getValue().isComplete()
&& (entry.getValue().isTerminal()
|| entry.getValue() == StepInstance.Status.NOT_CREATED))
.map(Map.Entry::getKey)
.collect(Collectors.toList()));
}
// handle the special case of restarting from a completed step
if (instance.getRunConfig().getPolicy() == RunPolicy.RESTART_FROM_SPECIFIC) {
String restartStepId =
RunRequest.getCurrentNode(instance.getRunConfig().getRestartConfig()).getStepId();
if (!instance.getRunConfig().getStartStepIds().contains(restartStepId)) {
instance.getRunConfig().getStartStepIds().add(restartStepId);
}
}
} else {
if (workflowInstance.getRunConfig().getStartStepIds() != null) {
instance
.getRunConfig()
.setStartStepIds(new ArrayList<>(workflowInstance.getRunConfig().getStartStepIds()));
}
if (workflowInstance.getRunConfig().getEndStepIds() != null) {
instance
.getRunConfig()
.setEndStepIds(new ArrayList<>(workflowInstance.getRunConfig().getEndStepIds()));
}
}
}
List<String> startStepIds =
instance.getRunConfig() != null && instance.getRunConfig().getStartStepIds() != null
? instance.getRunConfig().getStartStepIds()
: null;
List<String> endStepIds =
instance.getRunConfig() != null && instance.getRunConfig().getEndStepIds() != null
? instance.getRunConfig().getEndStepIds()
: null;
return WorkflowGraph.computeDag(instance.getRuntimeWorkflow(), startStepIds, endStepIds);
}
|
@Test
public void testTranslateIncludingAllSteps() {
Map<String, StepTransition> dag = translator.translate(instance);
Assert.assertEquals(
new HashSet<>(Arrays.asList("job1", "job.2", "job3", "job4")), dag.keySet());
}
|
@Override
public OAuth2AccessTokenDO checkAccessToken(String accessToken) {
OAuth2AccessTokenDO accessTokenDO = getAccessToken(accessToken);
if (accessTokenDO == null) {
throw exception0(GlobalErrorCodeConstants.UNAUTHORIZED.getCode(), "访问令牌不存在");
}
if (DateUtils.isExpired(accessTokenDO.getExpiresTime())) {
throw exception0(GlobalErrorCodeConstants.UNAUTHORIZED.getCode(), "访问令牌已过期");
}
return accessTokenDO;
}
|
@Test
public void testCheckAccessToken_null() {
// 调研,并断言
assertServiceException(() -> oauth2TokenService.checkAccessToken(randomString()),
new ErrorCode(401, "访问令牌不存在"));
}
|
@ConstantFunction(name = "bitShiftRightLogical", argTypes = {LARGEINT, BIGINT}, returnType = LARGEINT)
public static ConstantOperator bitShiftRightLogicalLargeInt(ConstantOperator first, ConstantOperator second) {
return ConstantOperator.createLargeInt(
bitShiftRightLogicalForInt128(first.getLargeInt(), (int) second.getBigint()));
}
|
@Test
public void bitShiftRightLogicalLargeInt() {
assertEquals("12",
ScalarOperatorFunctions.bitShiftRightLogicalLargeInt(O_LI_100, O_BI_3).getLargeInt().toString());
assertEquals("800",
ScalarOperatorFunctions.bitShiftRightLogicalLargeInt(O_LI_100, O_BI_NEG_3).getLargeInt().toString());
assertEquals("12",
ScalarOperatorFunctions.bitShiftRightLogicalLargeInt(O_LI_100, O_BI_131).getLargeInt().toString());
assertEquals("42535295865117307932921825928971026419",
ScalarOperatorFunctions.bitShiftRightLogicalLargeInt(O_LI_NEG_100, O_BI_3).getLargeInt().toString());
}
|
public ScheduledExecutorServiceBuilder scheduledExecutorService(String nameFormat) {
return scheduledExecutorService(nameFormat, false);
}
|
@Test
void scheduledExecutorServiceBuildsUserThreadsByDefault() {
final ScheduledExecutorService executorService = environment.scheduledExecutorService("user-%d").build();
assertThat(executorService.submit(() -> Thread.currentThread().isDaemon()))
.succeedsWithin(1, TimeUnit.SECONDS, as(BOOLEAN))
.isFalse();
}
|
@Override
public String getProperty(String name) {
Queue<Driver> queue = new LinkedList<>();
queue.add(this);
while (!queue.isEmpty()) {
Driver driver = queue.remove();
String property = driver.properties().get(name);
if (property != null) {
return property;
} else if (driver.parents() != null) {
queue.addAll(driver.parents());
}
}
return null;
}
|
@Test
public void testGetProperty() throws Exception {
DefaultDriver root = new DefaultDriver(ROOT, Lists.newArrayList(), MFR, HW, SW,
ImmutableMap.of(), ImmutableMap.of());
DefaultDriver child = new DefaultDriver(CHILD, Lists.newArrayList(root), MFR, HW, SW,
ImmutableMap.of(), ImmutableMap.of(KEY, VALUE));
DefaultDriver grandChild = new DefaultDriver(GRAND_CHILD, Lists.newArrayList(child),
MFR, HW, SW, ImmutableMap.of(), ImmutableMap.of());
assertNull(root.getProperty(KEY));
assertEquals(VALUE, child.getProperty(KEY));
assertEquals(VALUE, grandChild.getProperty(KEY));
}
|
@Override
public DescribeTopicsResult describeTopics(final TopicCollection topics, DescribeTopicsOptions options) {
if (topics instanceof TopicIdCollection)
return DescribeTopicsResult.ofTopicIds(handleDescribeTopicsByIds(((TopicIdCollection) topics).topicIds(), options));
else if (topics instanceof TopicNameCollection)
return DescribeTopicsResult.ofTopicNames(handleDescribeTopicsByNamesWithDescribeTopicPartitionsApi(((TopicNameCollection) topics).topicNames(), options));
else
throw new IllegalArgumentException("The TopicCollection: " + topics + " provided did not match any supported classes for describeTopics.");
}
|
@Test
public void testDescribeTopicsByIds() throws ExecutionException, InterruptedException {
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
// Valid ID
Uuid topicId = Uuid.randomUuid();
String topicName = "test-topic";
Node leader = env.cluster().nodes().get(0);
MetadataResponse.PartitionMetadata partitionMetadata = new MetadataResponse.PartitionMetadata(
Errors.NONE,
new TopicPartition(topicName, 0),
Optional.of(leader.id()),
Optional.of(10),
singletonList(leader.id()),
singletonList(leader.id()),
singletonList(leader.id()));
env.kafkaClient().prepareResponse(RequestTestUtils
.metadataResponse(
env.cluster().nodes(),
env.cluster().clusterResource().clusterId(),
env.cluster().controller().id(),
singletonList(new MetadataResponse.TopicMetadata(Errors.NONE, topicName, topicId, false,
singletonList(partitionMetadata), MetadataResponse.AUTHORIZED_OPERATIONS_OMITTED))));
TopicCollection.TopicIdCollection topicIds = TopicCollection.ofTopicIds(
singletonList(topicId));
DescribeTopicsResult describeTopicsresult = env.adminClient().describeTopics(topicIds);
Map<Uuid, TopicDescription> allTopicIds = describeTopicsresult.allTopicIds().get();
assertEquals(topicName, allTopicIds.get(topicId).name());
// ID not exist in brokers
Uuid nonExistID = Uuid.randomUuid();
env.kafkaClient().prepareResponse(RequestTestUtils
.metadataResponse(
env.cluster().nodes(),
env.cluster().clusterResource().clusterId(),
env.cluster().controller().id(),
emptyList()));
DescribeTopicsResult result1 = env.adminClient().describeTopics(
TopicCollection.ofTopicIds(singletonList(nonExistID)));
TestUtils.assertFutureError(result1.allTopicIds(), UnknownTopicIdException.class);
Exception e = assertThrows(Exception.class, () -> result1.allTopicIds().get(), "describe with non-exist topic ID should throw exception");
assertEquals(String.format("org.apache.kafka.common.errors.UnknownTopicIdException: TopicId %s not found.", nonExistID), e.getMessage());
DescribeTopicsResult result2 = env.adminClient().describeTopics(
TopicCollection.ofTopicIds(singletonList(Uuid.ZERO_UUID)));
TestUtils.assertFutureError(result2.allTopicIds(), InvalidTopicException.class);
e = assertThrows(Exception.class, () -> result2.allTopicIds().get(), "describe with non-exist topic ID should throw exception");
assertEquals("The given topic id 'AAAAAAAAAAAAAAAAAAAAAA' cannot be represented in a request.", e.getCause().getMessage());
}
}
|
public void createPipe(CreatePipeStmt stmt) throws DdlException {
try {
lock.writeLock().lock();
Pair<Long, String> dbIdAndName = resolvePipeNameUnlock(stmt.getPipeName());
boolean existed = nameToId.containsKey(dbIdAndName);
if (existed) {
if (!stmt.isIfNotExists() && !stmt.isReplace()) {
ErrorReport.reportSemanticException(ErrorCode.ERR_PIPE_EXISTS);
}
if (stmt.isIfNotExists()) {
return;
} else if (stmt.isReplace()) {
LOG.info("Pipe {} already exist, replace it with a new one", stmt.getPipeName());
Pipe pipe = pipeMap.get(nameToId.get(dbIdAndName));
dropPipeImpl(pipe);
}
}
// Add pipe
long id = GlobalStateMgr.getCurrentState().getNextId();
Pipe pipe = Pipe.fromStatement(id, stmt);
putPipe(pipe);
repo.addPipe(pipe);
} finally {
lock.writeLock().unlock();
}
}
|
@Test
public void showPipes() throws Exception {
PipeManager pm = ctx.getGlobalStateMgr().getPipeManager();
String createSql =
"create pipe show_1 as insert into tbl1 select * from files('path'='fake://pipe', 'format'='parquet')";
CreatePipeStmt createStmt = (CreatePipeStmt) UtFrameUtils.parseStmtWithNewParser(createSql, ctx);
pm.createPipe(createStmt);
createSql =
"create pipe show_2 as insert into tbl1 select * from files('path'='fake://pipe', 'format'='parquet')";
createStmt = (CreatePipeStmt) UtFrameUtils.parseStmtWithNewParser(createSql, ctx);
pm.createPipe(createStmt);
// show
String sql = "show pipes";
ShowPipeStmt showPipeStmt = (ShowPipeStmt) UtFrameUtils.parseStmtWithNewParser(sql, ctx);
ShowResultSet result = ShowExecutor.execute(showPipeStmt, ctx);
Assert.assertEquals(
Arrays.asList("show_1", "RUNNING", "pipe_test_db.tbl1",
"{\"loadedFiles\":0,\"loadedBytes\":0,\"loadingFiles\":0}", null),
result.getResultRows().get(0).subList(2, result.numColumns() - 1));
Assert.assertEquals(
Arrays.asList("show_2", "RUNNING", "pipe_test_db.tbl1",
"{\"loadedFiles\":0,\"loadedBytes\":0,\"loadingFiles\":0}", null),
result.getResultRows().get(1).subList(2, result.numColumns() - 1));
// desc
sql = "desc pipe show_1";
DescPipeStmt descPipeStmt = (DescPipeStmt) UtFrameUtils.parseStmtWithNewParser(sql, ctx);
result = ShowExecutor.execute(descPipeStmt, ctx);
Assert.assertEquals(
Arrays.asList("show_1", "FILE", "pipe_test_db.tbl1", "FILE_SOURCE(path=fake://pipe)",
"insert into tbl1 select * from files('path'='fake://pipe', 'format'='parquet')", ""),
result.getResultRows().get(0).subList(2, result.numColumns())
);
}
|
public static Class<?> name2class(String name) throws ClassNotFoundException {
return name2class(ClassUtils.getClassLoader(), name);
}
|
@Test
void testName2Class() throws Exception {
assertEquals(boolean.class, ReflectUtils.name2class("boolean"));
assertEquals(boolean[].class, ReflectUtils.name2class("boolean[]"));
assertEquals(int[][].class, ReflectUtils.name2class(ReflectUtils.getName(int[][].class)));
assertEquals(ReflectUtilsTest[].class, ReflectUtils.name2class(ReflectUtils.getName(ReflectUtilsTest[].class)));
}
|
public static UserAgent parse(String userAgentString) {
return UserAgentParser.parse(userAgentString);
}
|
@Test
public void parseIE11OnWindowsServer2008R2Test() {
final String uaStr = "Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko";
final UserAgent ua = UserAgentUtil.parse(uaStr);
assertEquals("MSIE11", ua.getBrowser().toString());
assertEquals("11.0", ua.getVersion());
assertEquals("Trident", ua.getEngine().toString());
assertEquals("7.0", ua.getEngineVersion());
assertEquals("Windows 7 or Windows Server 2008R2", ua.getOs().toString());
assertEquals("6.1", ua.getOsVersion());
assertEquals("Windows", ua.getPlatform().toString());
assertFalse(ua.isMobile());
}
|
static String resolveLocalRepoPath(String localRepoPath) {
// todo decouple home folder resolution
// find homedir
String home = System.getenv("ZEPPELIN_HOME");
if (home == null) {
home = System.getProperty("zeppelin.home");
}
if (home == null) {
home = "..";
}
return Paths.get(home).resolve(localRepoPath).toAbsolutePath().toString();
}
|
@Test
void should_not_change_absolute_path() {
String absolutePath
= Paths.get("first", "second").toAbsolutePath().toString();
String resolvedPath = Booter.resolveLocalRepoPath(absolutePath);
assertEquals(absolutePath, resolvedPath);
}
|
@Override
public void requestPermission(ApplicationId appId, Permission permission) {
states.computeIf(appId, securityInfo -> (securityInfo == null || securityInfo.getState() != POLICY_VIOLATED),
(id, securityInfo) -> new SecurityInfo(securityInfo.getPermissions(), POLICY_VIOLATED));
violations.compute(appId, (k, v) -> v == null ? Sets.newHashSet(permission) : addAndGet(v, permission));
}
|
@Test
public void testRequestPermission() {
states.compute(appId, (id, securityInfo) -> new SecurityInfo(securityInfo.getPermissions(), POLICY_VIOLATED));
assertEquals(POLICY_VIOLATED, states.get(appId).getState());
Permission testPermissionB = new Permission("testClassB", "testNameB");
violations.compute(appId,
(k, v) -> v == null ? Sets.newHashSet(testPermissionB) : addAndGet(v, testPermissionB));
assertTrue(violations.get(appId).contains(testPermissionB));
}
|
public DateTimeStamp minus(double offsetInDecimalSeconds) {
return add(-offsetInDecimalSeconds);
}
|
@Test
void minus() {
DateTimeStamp a = new DateTimeStamp(.586);
double a_ts = a.getTimeStamp();
ZonedDateTime a_dt = a.getDateTime();
DateTimeStamp b = new DateTimeStamp(.586 - .587);
assertEquals(b.getTimeStamp(), a.minus(.587).getTimeStamp(), .001);
assertEquals(a_ts, a.getTimeStamp(), .001); // test that a is unmodified
assertEquals(a_dt, a.getDateTime()); // test that a is unmodified
a = new DateTimeStamp("2018-04-04T09:10:00.586-0100");
a_ts = a.getTimeStamp();
a_dt = a.getDateTime();
b = new DateTimeStamp("2018-04-04T09:09:59.999-0100");
assertEquals(b.getDateTime(), a.minus(.587).getDateTime());
assertEquals(a_ts, a.getTimeStamp(), .001); // test that a is unmodified
assertEquals(a_dt, a.getDateTime()); // test that a is unmodified
a = new DateTimeStamp("2018-04-04T09:10:00.586-0100", 0.18);
a_ts = a.getTimeStamp();
a_dt = a.getDateTime();
b = new DateTimeStamp("2018-04-04T09:09:59.999-0100", 0.18 - .587);
DateTimeStamp c = a.minus(.587);
assertEquals(b.getDateTime(), c.getDateTime());
assertEquals(b.getTimeStamp(), c.getTimeStamp(), 0.001);
assertEquals(a_ts, a.getTimeStamp(), .001); // test that a is unmodified
assertEquals(a_dt, a.getDateTime()); // test that a is unmodified
}
|
@Override
public List<URI> getGlue() {
return configurationParameters
.get(GLUE_PROPERTY_NAME, s -> Arrays.asList(s.split(",")))
.orElse(Collections.singletonList(CLASSPATH_SCHEME_PREFIX))
.stream()
.map(String::trim)
.map(GluePath::parse)
.collect(Collectors.toList());
}
|
@Test
void getGlue() {
ConfigurationParameters config = new MapConfigurationParameters(
Constants.GLUE_PROPERTY_NAME,
"com.example.app, com.example.glue");
assertThat(new CucumberEngineOptions(config).getGlue(),
contains(
URI.create("classpath:/com/example/app"),
URI.create("classpath:/com/example/glue")));
}
|
public <K> KTableHolder<K> build(
final KTableHolder<K> table,
final TableSuppress<K> step,
final RuntimeBuildContext buildContext,
final ExecutionKeyFactory<K> executionKeyFactory
) {
return build(
table,
step,
buildContext,
executionKeyFactory,
PhysicalSchema::from,
buildContext.getMaterializedFactory()
);
}
|
@Test
@SuppressWarnings({"unchecked", "rawtypes"})
public void shouldSuppressSourceTable() {
// When:
final KTableHolder<Struct> result = builder.build(
tableHolder,
tableSuppress,
buildContext,
executionKeyFactory,
physicalSchemaFactory,
new MaterializedFactory()
);
// Then:
assertThat(result, is(suppressedtable));
verify(sourceKTable).transformValues(any(),any(Materialized.class));
verify(preKTable).suppress(suppressionCaptor.capture());
final FinalResultsSuppressionBuilder suppression = suppressionCaptor.getValue();
assertThat(suppression, isA(FinalResultsSuppressionBuilder.class));
}
|
public static HiveConf create(Configuration conf) {
HiveConf hiveConf = new HiveConf(conf, HiveConf.class);
// to make sure Hive configuration properties in conf not be overridden
hiveConf.addResource(conf);
return hiveConf;
}
|
@Test
public void testCreateHiveConf() {
HiveConf hiveConf = createHiveConf();
assertThat(hiveConf.getBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL)).isTrue();
// will override configurations from `hiveConf` with hive default values which default value
// is null or empty string
assertThat(
new HiveConf(hiveConf, HiveConf.class)
.getBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL))
.isFalse();
assertThat(
HiveConfUtils.create(hiveConf)
.getBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL))
.isTrue();
}
|
public <T> void resolve(T resolvable) {
ParamResolver resolver = this;
if (ParamScope.class.isAssignableFrom(resolvable.getClass())) {
ParamScope newScope = (ParamScope) resolvable;
resolver = newScope.applyOver(resolver);
}
resolveStringLeaves(resolvable, resolver);
resolveNonStringLeaves(resolvable, resolver);
resolveNodes(resolvable, resolver);
}
|
@Test
public void shouldResolveInMergePipelineConfigs() {
PipelineConfig pipelineConfig = PipelineConfigMother.createPipelineConfig("cruise", "dev", "ant");
pipelineConfig.setLabelTemplate("2.1-${COUNT}-#{foo}-bar-#{bar}");
HgMaterialConfig materialConfig = MaterialConfigsMother.hgMaterialConfig("http://#{foo}.com/#{bar}");
pipelineConfig.addMaterialConfig(materialConfig);
MergePipelineConfigs merge = new MergePipelineConfigs(new BasicPipelineConfigs(), new BasicPipelineConfigs(pipelineConfig));
new ParamResolver(new ParamSubstitutionHandlerFactory(params(param("foo", "pavan"), param("bar", "jj"))), fieldCache).resolve(merge);
assertThat(pipelineConfig.getLabelTemplate(), is("2.1-${COUNT}-pavan-bar-jj"));
assertThat(pipelineConfig.materialConfigs().get(1).getUriForDisplay(), is("http://pavan.com/jj"));
}
|
@SuppressWarnings("unchecked")
@Override
public void configure(final Map<String, ?> configs, final boolean isKey) {
final String windowedInnerClassSerdeConfig = (String) configs.get(StreamsConfig.WINDOWED_INNER_CLASS_SERDE);
Serde<T> windowInnerClassSerde = null;
if (windowedInnerClassSerdeConfig != null) {
try {
windowInnerClassSerde = Utils.newInstance(windowedInnerClassSerdeConfig, Serde.class);
} catch (final ClassNotFoundException e) {
throw new ConfigException(StreamsConfig.WINDOWED_INNER_CLASS_SERDE, windowedInnerClassSerdeConfig,
"Serde class " + windowedInnerClassSerdeConfig + " could not be found.");
}
}
if (inner != null && windowedInnerClassSerdeConfig != null) {
if (!inner.getClass().getName().equals(windowInnerClassSerde.serializer().getClass().getName())) {
throw new IllegalArgumentException("Inner class serializer set using constructor "
+ "(" + inner.getClass().getName() + ")" +
" is different from the one set in windowed.inner.class.serde config " +
"(" + windowInnerClassSerde.serializer().getClass().getName() + ").");
}
} else if (inner == null && windowedInnerClassSerdeConfig == null) {
throw new IllegalArgumentException("Inner class serializer should be set either via constructor " +
"or via the windowed.inner.class.serde config");
} else if (inner == null)
inner = windowInnerClassSerde.serializer();
}
|
@Test
public void shouldThrowConfigExceptionWhenInvalidWindowedInnerClassSerialiserSupplied() {
props.put(StreamsConfig.WINDOWED_INNER_CLASS_SERDE, "some.non.existent.class");
assertThrows(ConfigException.class, () -> timeWindowedSerializer.configure(props, false));
}
|
public String getMethod() {
return method;
}
|
@Test
public void testGetMethod() {
shenyuRequestLog.setMethod("test");
Assertions.assertEquals(shenyuRequestLog.getMethod(), "test");
}
|
public static WriteStreams writeStreams() {
return new AutoValue_RedisIO_WriteStreams.Builder()
.setConnectionConfiguration(RedisConnectionConfiguration.create())
.setMaxLen(0L)
.setApproximateTrim(true)
.build();
}
|
@Test
public void testWriteStreamsWithTruncation() {
/* test data is 10 keys (stream IDs), each with two entries, each entry having one k/v pair of data */
List<String> redisKeys =
IntStream.range(0, 10).boxed().map(idx -> UUID.randomUUID().toString()).collect(toList());
Map<String, String> fooValues = ImmutableMap.of("sensor-id", "1234", "temperature", "19.8");
Map<String, String> barValues = ImmutableMap.of("sensor-id", "9999", "temperature", "18.2");
List<KV<String, Map<String, String>>> allData =
redisKeys.stream()
.flatMap(id -> Stream.of(KV.of(id, fooValues), KV.of(id, barValues)))
.collect(toList());
PCollection<KV<String, Map<String, String>>> write =
p.apply(
Create.of(allData)
.withCoder(
KvCoder.of(
StringUtf8Coder.of(),
MapCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of()))));
write.apply(
RedisIO.writeStreams()
.withEndpoint(REDIS_HOST, port)
.withMaxLen(1)
.withApproximateTrim(false));
p.run();
for (String stream : redisKeys) {
long count = client.xlen(stream);
assertEquals(1, count);
}
}
|
@Override
public void doFilter(ServletRequest req, ServletResponse resp, FilterChain chain) throws IOException, ServletException {
doHttpFilter((HttpServletRequest) req, (HttpServletResponse) resp, chain);
}
|
@Test
public void ifRequestUriIsNull_returnBadRequest() throws ServletException, IOException {
HttpServletRequest request = newRequest("GET", "/");
when(request.getRequestURI()).thenReturn(null);
underTest.doFilter(request, response, chain);
verify(response).setStatus(HttpServletResponse.SC_BAD_REQUEST);
}
|
@Override
public V get(HollowMap<K, V> map, int ordinal, Object key) {
if(getSchema().getHashKey() != null) {
for(int i=0;i<ordinals.length;i+=2) {
if(ordinals[i] != -1 && map.equalsKey(ordinals[i], key))
return map.instantiateValue(ordinals[i+1]);
}
} else {
int hashCode = dataAccess.getDataAccess().getHashCodeFinder().hashCode(key);
int bucket = (HashCodes.hashInt(hashCode) & hashMask) * 2;
while(ordinals[bucket] != -1) {
if(map.equalsKey(ordinals[bucket], key)) {
return map.instantiateValue(ordinals[bucket + 1]);
}
bucket += 2;
bucket &= ordinals.length - 1;
}
}
return null;
}
|
@Test
public void testGetOnEmptyMap() throws Exception {
addRecord();
addRecord(10, 20);
roundTripSnapshot();
HollowMapCachedDelegate<Integer, Integer> delegate = new HollowMapCachedDelegate<Integer, Integer>((HollowMapTypeReadState)readStateEngine.getTypeState("TestMap"), 0);
HollowMap<Integer, Integer> map = new HollowMap<Integer, Integer>(delegate, 0) {
public Integer instantiateKey(int keyOrdinal) {
return keyOrdinal;
}
public Integer instantiateValue(int valueOrdinal) {
return valueOrdinal;
}
public boolean equalsKey(int keyOrdinal, Object testObject) {
return keyOrdinal == (Integer)testObject;
}
public boolean equalsValue(int valueOrdinal, Object testObject) {
return valueOrdinal == (Integer)testObject;
}
};
Assert.assertNull(delegate.get(map, 0, 10));
}
|
public static StatementExecutorResponse execute(
final ConfiguredStatement<AssertSchema> statement,
final SessionProperties sessionProperties,
final KsqlExecutionContext executionContext,
final ServiceContext serviceContext
) {
return AssertExecutor.execute(
statement.getMaskedStatementText(),
statement.getStatement(),
executionContext.getKsqlConfig().getInt(KSQL_ASSERT_SCHEMA_DEFAULT_TIMEOUT_MS),
serviceContext,
(stmt, sc) -> assertSchema(
sc.getSchemaRegistryClient(),
((AssertSchema) stmt).getSubject(),
((AssertSchema) stmt).getId(),
stmt.checkExists()),
(str, stmt) -> new AssertSchemaEntity(
str,
((AssertSchema) stmt).getSubject(),
((AssertSchema) stmt).getId(),
stmt.checkExists())
);
}
|
@Test
public void shouldFailToAssertNotExistSchemaBySubjectAndId() {
// Given
final AssertSchema assertSchema = new AssertSchema(Optional.empty(), Optional.of("abc"), Optional.of(500), Optional.empty(), false);
final ConfiguredStatement<AssertSchema> statement = ConfiguredStatement
.of(KsqlParser.PreparedStatement.of("", assertSchema),
SessionConfig.of(ksqlConfig, ImmutableMap.of()));
// When:
final KsqlRestException e = assertThrows(KsqlRestException.class, () ->
AssertSchemaExecutor.execute(statement, mock(SessionProperties.class), engine, serviceContext));
// Then:
assertThat(e.getResponse().getStatus(), is(417));
assertThat(((KsqlErrorMessage) e.getResponse().getEntity()).getMessage(), is("Schema with subject name abc id 500 exists"));
}
|
@Override
protected void processOptions(LinkedList<String> args)
throws IOException {
CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE,
OPTION_PATHONLY, OPTION_DIRECTORY, OPTION_HUMAN,
OPTION_HIDENONPRINTABLE, OPTION_RECURSIVE, OPTION_REVERSE,
OPTION_MTIME, OPTION_SIZE, OPTION_ATIME, OPTION_ECPOLICY);
cf.parse(args);
pathOnly = cf.getOpt(OPTION_PATHONLY);
dirRecurse = !cf.getOpt(OPTION_DIRECTORY);
setRecursive(cf.getOpt(OPTION_RECURSIVE) && dirRecurse);
humanReadable = cf.getOpt(OPTION_HUMAN);
hideNonPrintable = cf.getOpt(OPTION_HIDENONPRINTABLE);
orderReverse = cf.getOpt(OPTION_REVERSE);
orderTime = cf.getOpt(OPTION_MTIME);
orderSize = !orderTime && cf.getOpt(OPTION_SIZE);
useAtime = cf.getOpt(OPTION_ATIME);
displayECPolicy = cf.getOpt(OPTION_ECPOLICY);
if (args.isEmpty()) args.add(Path.CUR_DIR);
initialiseOrderComparator();
}
|
@Test
public void processPathDirOrderDefault() throws IOException {
TestFile testfile01 = new TestFile("testDirectory", "testFile01");
TestFile testfile02 = new TestFile("testDirectory", "testFile02");
TestFile testfile03 = new TestFile("testDirectory", "testFile03");
TestFile testfile04 = new TestFile("testDirectory", "testFile04");
TestFile testfile05 = new TestFile("testDirectory", "testFile05");
TestFile testfile06 = new TestFile("testDirectory", "testFile06");
TestFile testDir = new TestFile("", "testDirectory");
testDir.setIsDir(true);
// add contents in non-lexigraphic order to show they get sorted
testDir.addContents(testfile01, testfile03, testfile05, testfile02,
testfile04, testfile06);
LinkedList<PathData> pathData = new LinkedList<PathData>();
pathData.add(testDir.getPathData());
PrintStream out = mock(PrintStream.class);
Ls ls = new Ls();
ls.out = out;
LinkedList<String> options = new LinkedList<String>();
ls.processOptions(options);
String lineFormat = TestFile.computeLineFormat(pathData);
ls.processArguments(pathData);
InOrder inOrder = inOrder(out);
inOrder.verify(out).println("Found 6 items");
inOrder.verify(out).println(testfile01.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile02.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile03.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile04.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile05.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile06.formatLineMtime(lineFormat));
verifyNoMoreInteractions(out);
}
|
public void handleAssignment(final Map<TaskId, Set<TopicPartition>> activeTasks,
final Map<TaskId, Set<TopicPartition>> standbyTasks) {
log.info("Handle new assignment with:\n" +
"\tNew active tasks: {}\n" +
"\tNew standby tasks: {}\n" +
"\tExisting active tasks: {}\n" +
"\tExisting standby tasks: {}",
activeTasks.keySet(), standbyTasks.keySet(), activeTaskIds(), standbyTaskIds());
topologyMetadata.addSubscribedTopicsFromAssignment(
activeTasks.values().stream().flatMap(Collection::stream).collect(Collectors.toSet()),
logPrefix
);
final Map<TaskId, Set<TopicPartition>> activeTasksToCreate = new HashMap<>(activeTasks);
final Map<TaskId, Set<TopicPartition>> standbyTasksToCreate = new HashMap<>(standbyTasks);
final Map<Task, Set<TopicPartition>> tasksToRecycle = new HashMap<>();
final Set<Task> tasksToCloseClean = new TreeSet<>(Comparator.comparing(Task::id));
final Set<TaskId> tasksToLock =
tasks.allTaskIds().stream()
.filter(x -> activeTasksToCreate.containsKey(x) || standbyTasksToCreate.containsKey(x))
.collect(Collectors.toSet());
maybeLockTasks(tasksToLock);
// first put aside those unrecognized tasks because of unknown named-topologies
tasks.clearPendingTasksToCreate();
tasks.addPendingActiveTasksToCreate(pendingTasksToCreate(activeTasksToCreate));
tasks.addPendingStandbyTasksToCreate(pendingTasksToCreate(standbyTasksToCreate));
// first rectify all existing tasks:
// 1. for tasks that are already owned, just update input partitions / resume and skip re-creating them
// 2. for tasks that have changed active/standby status, just recycle and skip re-creating them
// 3. otherwise, close them since they are no longer owned
final Map<TaskId, RuntimeException> failedTasks = new LinkedHashMap<>();
if (stateUpdater == null) {
handleTasksWithoutStateUpdater(activeTasksToCreate, standbyTasksToCreate, tasksToRecycle, tasksToCloseClean);
} else {
handleTasksWithStateUpdater(
activeTasksToCreate,
standbyTasksToCreate,
tasksToRecycle,
tasksToCloseClean,
failedTasks
);
failedTasks.putAll(collectExceptionsAndFailedTasksFromStateUpdater());
}
final Map<TaskId, RuntimeException> taskCloseExceptions = closeAndRecycleTasks(tasksToRecycle, tasksToCloseClean);
maybeUnlockTasks(tasksToLock);
failedTasks.putAll(taskCloseExceptions);
maybeThrowTaskExceptions(failedTasks);
createNewTasks(activeTasksToCreate, standbyTasksToCreate);
}
|
@Test
public void shouldRecycleActiveTaskInStateUpdater() {
final StreamTask activeTaskToRecycle = statefulTask(taskId03, taskId03ChangelogPartitions)
.inState(State.RESTORING)
.withInputPartitions(taskId03Partitions).build();
final StandbyTask recycledStandbyTask = standbyTask(taskId03, taskId03ChangelogPartitions)
.inState(State.CREATED)
.withInputPartitions(taskId03Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManager(ProcessingMode.AT_LEAST_ONCE, tasks, true);
when(stateUpdater.getTasks()).thenReturn(mkSet(activeTaskToRecycle));
when(standbyTaskCreator.createStandbyTaskFromActive(activeTaskToRecycle, taskId03Partitions))
.thenReturn(recycledStandbyTask);
final CompletableFuture<StateUpdater.RemovedTaskResult> future = new CompletableFuture<>();
when(stateUpdater.remove(taskId03)).thenReturn(future);
future.complete(new StateUpdater.RemovedTaskResult(activeTaskToRecycle));
taskManager.handleAssignment(
Collections.emptyMap(),
mkMap(mkEntry(activeTaskToRecycle.id(), activeTaskToRecycle.inputPartitions()))
);
verify(tasks).addPendingTasksToInit(Collections.singleton(recycledStandbyTask));
verify(activeTaskCreator).createTasks(consumer, Collections.emptyMap());
verify(standbyTaskCreator).createTasks(Collections.emptyMap());
}
|
@Override
public void send(Object message) throws RemotingException {
send(message, false);
}
|
@Test
void sendTest01() throws RemotingException {
boolean sent = true;
String message = "this is a test message";
header.send(message, sent);
List<Object> objects = channel.getSentObjects();
Assertions.assertEquals(objects.get(0), "this is a test message");
}
|
public List<ConnectorMetadataUpdateHandle> getMetadataUpdateResults(List<ConnectorMetadataUpdateHandle> metadataUpdateRequests, QueryId queryId)
{
ImmutableList.Builder<ConnectorMetadataUpdateHandle> metadataUpdateResults = ImmutableList.builder();
for (ConnectorMetadataUpdateHandle connectorMetadataUpdateHandle : metadataUpdateRequests) {
HiveMetadataUpdateHandle request = (HiveMetadataUpdateHandle) connectorMetadataUpdateHandle;
String fileName = getFileName(request, queryId);
metadataUpdateResults.add(new HiveMetadataUpdateHandle(request.getRequestId(), request.getSchemaTableName(), request.getPartitionName(), Optional.of(fileName)));
}
return metadataUpdateResults.build();
}
|
@Test
public void testHiveFileRenamer()
{
HiveFileRenamer hiveFileRenamer = new HiveFileRenamer();
List<ConnectorMetadataUpdateHandle> requests = ImmutableList.of(TEST_HIVE_METADATA_UPDATE_REQUEST);
List<ConnectorMetadataUpdateHandle> results = hiveFileRenamer.getMetadataUpdateResults(requests, TEST_QUERY_ID);
// Assert # of requests is equal to # of results
assertEquals(requests.size(), results.size());
HiveMetadataUpdateHandle result = (HiveMetadataUpdateHandle) results.get(0);
assertEquals(result.getRequestId(), TEST_REQUEST_ID);
assertEquals(result.getSchemaTableName(), TEST_SCHEMA_TABLE_NAME);
assertEquals(result.getPartitionName(), Optional.of(TEST_PARTITION_NAME));
// Assert file name returned is "1"
assertEquals(result.getMetadataUpdate(), Optional.of("0"));
}
|
@Override
public ConfigOperateResult insertOrUpdateBeta(final ConfigInfo configInfo, final String betaIps, final String srcIp,
final String srcUser) {
ConfigInfoStateWrapper configInfo4BetaState = this.findConfigInfo4BetaState(configInfo.getDataId(),
configInfo.getGroup(), configInfo.getTenant());
if (configInfo4BetaState == null) {
return addConfigInfo4Beta(configInfo, betaIps, srcIp, srcUser);
} else {
return updateConfigInfo4Beta(configInfo, betaIps, srcIp, srcUser);
}
}
|
@Test
void testInsertOrUpdateBetaOfAdd() {
String dataId = "betaDataId113";
String group = "group113";
String tenant = "tenant113";
//mock exist beta
ConfigInfoStateWrapper mockedConfigInfoStateWrapper = new ConfigInfoStateWrapper();
mockedConfigInfoStateWrapper.setDataId(dataId);
mockedConfigInfoStateWrapper.setGroup(group);
mockedConfigInfoStateWrapper.setTenant(tenant);
mockedConfigInfoStateWrapper.setId(123456L);
mockedConfigInfoStateWrapper.setLastModified(System.currentTimeMillis());
when(jdbcTemplate.queryForObject(anyString(), eq(new Object[] {dataId, group, tenant}),
eq(CONFIG_INFO_STATE_WRAPPER_ROW_MAPPER))).thenThrow(new EmptyResultDataAccessException(1))
.thenReturn(mockedConfigInfoStateWrapper);
String betaIps = "betaips...";
String srcIp = "srcUp...";
String srcUser = "srcUser...";
String appName = "appname";
String content = "content111";
ConfigInfo configInfo = new ConfigInfo(dataId, group, tenant, appName, content);
configInfo.setEncryptedDataKey("key34567");
//execute
ConfigOperateResult configOperateResult = externalConfigInfoBetaPersistService.insertOrUpdateBeta(configInfo, betaIps, srcIp,
srcUser);
//expect return obj
assertEquals(mockedConfigInfoStateWrapper.getId(), configOperateResult.getId());
assertEquals(mockedConfigInfoStateWrapper.getLastModified(), configOperateResult.getLastModified());
//verify add to be invoked
Mockito.verify(jdbcTemplate, times(1))
.update(anyString(), eq(dataId), eq(group), eq(tenant), eq(configInfo.getAppName()), eq(configInfo.getContent()),
eq(configInfo.getMd5()), eq(betaIps), eq(srcIp), eq(srcUser), eq(configInfo.getEncryptedDataKey()));
}
|
@Override
public void close() throws Exception {
committer.stopAsync().awaitTerminated(1, TimeUnit.MINUTES);
}
|
@Test
public void close() throws Exception {
committer.close();
verify(fakeCommitter).stopAsync();
verify(fakeCommitter).awaitTerminated(1, TimeUnit.MINUTES);
}
|
public static Schema schemaFromPojoClass(
TypeDescriptor<?> typeDescriptor, FieldValueTypeSupplier fieldValueTypeSupplier) {
return StaticSchemaInference.schemaFromClass(typeDescriptor, fieldValueTypeSupplier);
}
|
@Test
public void testNestedPOJO() {
Schema schema =
POJOUtils.schemaFromPojoClass(
new TypeDescriptor<NestedPOJO>() {}, JavaFieldTypeSupplier.INSTANCE);
SchemaTestUtils.assertSchemaEquivalent(NESTED_POJO_SCHEMA, schema);
}
|
public long iterateAtLeastFrom(int index, Iterator<E> iterator) {
return storage.iterateAtLeastFrom(index, iterator);
}
|
@Test
public void testIterateAtLeastFrom() {
SparseIntArray.Iterator<Integer> iterator = new SparseIntArray.Iterator<>();
// test empty array
verifyIterateAtLeastFrom(0, iterator);
// try dense
for (int i = 0; i < ARRAY_STORAGE_32_MAX_SPARSE_SIZE / 2; ++i) {
set(i);
verifyIterateAtLeastFrom(i, iterator);
}
// go sparse
for (int i = 1000000; i < 1000000 + ARRAY_STORAGE_32_MAX_SPARSE_SIZE; ++i) {
set(i);
verifyIterateAtLeastFrom(i, iterator);
}
// clear everything we have added
for (int i = 0; i < ARRAY_STORAGE_32_MAX_SPARSE_SIZE / 2; ++i) {
clear(i);
verifyIterateAtLeastFrom(i, iterator);
}
for (int i = 1000000; i < 1000000 + ARRAY_STORAGE_32_MAX_SPARSE_SIZE; ++i) {
clear(i);
verifyIterateAtLeastFrom(i, iterator);
}
// test empty again
verifyIterateAtLeastFrom(100, iterator);
// try gaps
for (int i = 0; i < 1000; ++i) {
set(i * i);
verifyIterateAtLeastFrom(i, iterator);
}
// try larger gaps
for (int i = (int) Math.sqrt(Integer.MAX_VALUE) - 1000; i < (int) Math.sqrt(Integer.MAX_VALUE); ++i) {
set(i * i);
verifyIterateAtLeastFrom(i, iterator);
}
// try some edge cases
for (int i = -2; i <= 2; ++i) {
set(i);
verifyIterateAtLeastFrom(i, iterator);
}
for (int i = Short.MAX_VALUE - 2; i <= Short.MAX_VALUE + 2; ++i) {
set(i);
verifyIterateAtLeastFrom(i, iterator);
}
for (int i = Short.MIN_VALUE - 2; i <= Short.MIN_VALUE + 2; ++i) {
set(i);
verifyIterateAtLeastFrom(i, iterator);
}
for (long i = (long) Integer.MAX_VALUE - 2; i <= (long) Integer.MAX_VALUE + 2; ++i) {
set((int) i);
verifyIterateAtLeastFrom((int) i, iterator);
}
for (long i = (long) Integer.MIN_VALUE - 2; i <= (long) Integer.MIN_VALUE + 2; ++i) {
set((int) i);
verifyIterateAtLeastFrom((int) i, iterator);
}
}
|
public static FusedPipeline fuse(Pipeline p) {
return new GreedyPipelineFuser(p).fusedPipeline;
}
|
@Test
public void flattenWithHeterogeneousInputsSingleEnvOutputPartiallyMaterialized() {
Components components =
Components.newBuilder()
.putCoders("coder", Coder.newBuilder().build())
.putCoders("windowCoder", Coder.newBuilder().build())
.putWindowingStrategies(
"ws", WindowingStrategy.newBuilder().setWindowCoderId("windowCoder").build())
.putTransforms(
"pyImpulse",
PTransform.newBuilder()
.setUniqueName("PyImpulse")
.putOutputs("output", "pyImpulse.out")
.setSpec(
FunctionSpec.newBuilder()
.setUrn(PTransformTranslation.IMPULSE_TRANSFORM_URN))
.build())
.putPcollections("pyImpulse.out", pc("pyImpulse.out"))
.putTransforms(
"pyRead",
PTransform.newBuilder()
.setUniqueName("PyRead")
.putInputs("input", "pyImpulse.out")
.putOutputs("output", "pyRead.out")
.setSpec(
FunctionSpec.newBuilder()
.setUrn(PTransformTranslation.PAR_DO_TRANSFORM_URN)
.setPayload(
ParDoPayload.newBuilder()
.setDoFn(FunctionSpec.newBuilder())
.build()
.toByteString()))
.setEnvironmentId("py")
.build())
.putPcollections("pyRead.out", pc("pyRead.out"))
.putTransforms(
"goImpulse",
PTransform.newBuilder()
.setUniqueName("GoImpulse")
.putOutputs("output", "goImpulse.out")
.setSpec(
FunctionSpec.newBuilder()
.setUrn(PTransformTranslation.IMPULSE_TRANSFORM_URN))
.build())
.putPcollections("goImpulse.out", pc("goImpulse.out"))
.putTransforms(
"goRead",
PTransform.newBuilder()
.setUniqueName("GoRead")
.putInputs("input", "goImpulse.out")
.putOutputs("output", "goRead.out")
.setSpec(
FunctionSpec.newBuilder()
.setUrn(PTransformTranslation.PAR_DO_TRANSFORM_URN)
.setPayload(
ParDoPayload.newBuilder()
.setDoFn(FunctionSpec.newBuilder())
.build()
.toByteString()))
.setEnvironmentId("go")
.build())
.putPcollections("goRead.out", pc("goRead.out"))
.putTransforms(
"flatten",
PTransform.newBuilder()
.setUniqueName("Flatten")
.putInputs("goReadInput", "goRead.out")
.putInputs("pyReadInput", "pyRead.out")
.putOutputs("output", "flatten.out")
.setSpec(
FunctionSpec.newBuilder()
.setUrn(PTransformTranslation.FLATTEN_TRANSFORM_URN))
.build())
.putPcollections("flatten.out", pc("flatten.out"))
.putTransforms(
"goParDo",
PTransform.newBuilder()
.setUniqueName("GoParDo")
.putInputs("input", "flatten.out")
.putOutputs("output", "goParDo.out")
.setSpec(
FunctionSpec.newBuilder()
.setUrn(PTransformTranslation.PAR_DO_TRANSFORM_URN)
.setPayload(
ParDoPayload.newBuilder()
.setDoFn(FunctionSpec.newBuilder())
.build()
.toByteString()))
.setEnvironmentId("go")
.build())
.putPcollections("goParDo.out", pc("goParDo.out"))
.putEnvironments("go", Environments.createDockerEnvironment("go"))
.putEnvironments("py", Environments.createDockerEnvironment("py"))
.build();
FusedPipeline fused =
GreedyPipelineFuser.fuse(Pipeline.newBuilder().setComponents(components).build());
assertThat(
fused.getRunnerExecutedTransforms(),
containsInAnyOrder(
PipelineNode.pTransform("pyImpulse", components.getTransformsOrThrow("pyImpulse")),
PipelineNode.pTransform("goImpulse", components.getTransformsOrThrow("goImpulse"))));
assertThat(
fused.getFusedStages(),
containsInAnyOrder(
ExecutableStageMatcher.withInput("goImpulse.out")
.withNoOutputs()
.withTransforms("goRead", "flatten", "goParDo"),
ExecutableStageMatcher.withInput("pyImpulse.out")
.withOutputs("flatten.out")
.withTransforms("pyRead", "flatten"),
ExecutableStageMatcher.withInput("flatten.out")
.withNoOutputs()
.withTransforms("goParDo")));
}
|
@Override
protected void doStart() throws Exception {
super.doStart();
LOG.debug("Creating connection to Azure ServiceBus");
client = getEndpoint().getServiceBusClientFactory().createServiceBusProcessorClient(getConfiguration(),
this::processMessage, this::processError);
client.start();
}
|
@Test
void synchronizationDeadLettersMessageWithoutOptionsWhenExceptionNotPresent() throws Exception {
try (ServiceBusConsumer consumer = new ServiceBusConsumer(endpoint, processor)) {
when(configuration.getServiceBusReceiveMode()).thenReturn(ServiceBusReceiveMode.PEEK_LOCK);
when(configuration.isEnableDeadLettering()).thenReturn(true);
consumer.doStart();
verify(client).start();
verify(clientFactory).createServiceBusProcessorClient(any(), any(), any());
when(messageContext.getMessage()).thenReturn(message);
processMessageCaptor.getValue().accept(messageContext);
verify(messageContext).getMessage();
verify(processor).process(any(Exchange.class), any(AsyncCallback.class));
Exchange exchange = exchangeCaptor.getValue();
assertThat(exchange).isNotNull();
Synchronization synchronization = exchange.getExchangeExtension().handoverCompletions().get(0);
synchronization.onFailure(exchange);
verify(messageContext).deadLetter();
verifyNoMoreInteractions(messageContext);
}
}
|
public synchronized int sendFetches() {
final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests();
sendFetchesInternal(
fetchRequests,
(fetchTarget, data, clientResponse) -> {
synchronized (Fetcher.this) {
handleFetchSuccess(fetchTarget, data, clientResponse);
}
},
(fetchTarget, data, error) -> {
synchronized (Fetcher.this) {
handleFetchFailure(fetchTarget, data, error);
}
});
return fetchRequests.size();
}
|
@Test
public void testPartialFetchWithPausedPartitions() {
// this test sends creates a completed fetch with 3 records and a max poll of 2 records to assert
// that a fetch that must be returned over at least 2 polls can be cached successfully when its partition is
// paused, then returned successfully after it has been resumed again later
buildFetcher(2);
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords;
assignFromUser(mkSet(tp0, tp1));
subscriptions.seek(tp0, 1);
assertEquals(1, sendFetches());
client.prepareResponse(fullFetchResponse(tidp0, records, Errors.NONE, 100L, 0));
consumerClient.poll(time.timer(0));
fetchedRecords = fetchRecords();
assertEquals(2, fetchedRecords.get(tp0).size(), "Should return 2 records from fetch with 3 records");
assertFalse(fetcher.hasCompletedFetches(), "Should have no completed fetches");
subscriptions.pause(tp0);
consumerClient.poll(time.timer(0));
assertEmptyFetch("Should not return records or advance position for paused partitions");
assertTrue(fetcher.hasCompletedFetches(), "Should have 1 entry in completed fetches");
assertFalse(fetcher.hasAvailableFetches(), "Should not have any available (non-paused) completed fetches");
subscriptions.resume(tp0);
consumerClient.poll(time.timer(0));
fetchedRecords = fetchRecords();
assertEquals(1, fetchedRecords.get(tp0).size(), "Should return last remaining record");
assertFalse(fetcher.hasCompletedFetches(), "Should have no completed fetches");
}
|
public URLNormalizer removeWWW() {
String host = toURL().getHost();
String newHost = StringUtils.removeStartIgnoreCase(host, "www.");
url = StringUtils.replaceOnce(url, host, newHost);
return this;
}
|
@Test
public void testRemoveWWW() {
s = "http://www.example.com/foo.html";
t = "http://example.com/foo.html";
assertEquals(t, n(s).removeWWW().toString());
s = "http://wwwexample.com/foo.html";
t = "http://wwwexample.com/foo.html";
assertEquals(t, n(s).removeWWW().toString());
}
|
public static <E> E findStaticFieldValue(Class clazz, String fieldName) {
try {
Field field = clazz.getField(fieldName);
return (E) field.get(null);
} catch (Exception ignore) {
return null;
}
}
|
@Test
public void test_whenClassExist_butFieldDoesNot() {
Integer value = findStaticFieldValue(ClassWithStaticField.class, "nonexisting");
assertNull(value);
}
|
public static IntArrayList constant(int size, int value) {
IntArrayList result = new IntArrayList(size);
Arrays.fill(result.buffer, value);
result.elementsCount = size;
return result;
}
|
@Test
public void testConstant() {
IntArrayList list = ArrayUtil.constant(10, 3);
assertEquals(10, list.size());
assertEquals(3, list.get(5));
assertEquals(3, list.get(9));
assertEquals(10, list.buffer.length);
}
|
public Optional<ProjectAlmSettingDto> findProjectBindingByUuid(String uuid) {
try (DbSession session = dbClient.openSession(false)) {
return dbClient.projectAlmSettingDao().selectByUuid(session, uuid);
}
}
|
@Test
void findProjectBindingByUuid_whenNoResult_returnsOptionalEmpty() {
when(dbClient.projectAlmSettingDao().selectByUuid(dbSession, UUID)).thenReturn(Optional.empty());
assertThat(underTest.findProjectBindingByUuid(UUID)).isEmpty();
}
|
@Override
public GlobalRollbackRequestProto convert2Proto(GlobalRollbackRequest globalRollbackRequest) {
final short typeCode = globalRollbackRequest.getTypeCode();
final AbstractMessageProto abstractMessage = AbstractMessageProto.newBuilder().setMessageType(
MessageTypeProto.forNumber(typeCode)).build();
final AbstractTransactionRequestProto abstractTransactionRequestProto = AbstractTransactionRequestProto
.newBuilder().setAbstractMessage(abstractMessage).build();
final String extraData = globalRollbackRequest.getExtraData();
AbstractGlobalEndRequestProto abstractGlobalEndRequestProto = AbstractGlobalEndRequestProto.newBuilder()
.setAbstractTransactionRequest(abstractTransactionRequestProto).setXid(globalRollbackRequest.getXid())
.setExtraData(extraData == null ? "" : extraData).build();
GlobalRollbackRequestProto result = GlobalRollbackRequestProto.newBuilder().setAbstractGlobalEndRequest(
abstractGlobalEndRequestProto).build();
return result;
}
|
@Test
public void convert2Proto() {
GlobalRollbackRequest globalRollbackRequest = new GlobalRollbackRequest();
globalRollbackRequest.setExtraData("extraData");
globalRollbackRequest.setXid("xid");
GlobalRollbackRequestConvertor convertor = new GlobalRollbackRequestConvertor();
GlobalRollbackRequestProto proto = convertor.convert2Proto(
globalRollbackRequest);
GlobalRollbackRequest real = convertor.convert2Model(proto);
assertThat((real.getTypeCode())).isEqualTo(globalRollbackRequest.getTypeCode());
assertThat((real.getXid())).isEqualTo(globalRollbackRequest.getXid());
assertThat((real.getExtraData())).isEqualTo(globalRollbackRequest.getExtraData());
}
|
public static <T> Stream<T> stream(Enumeration<T> e) {
return StreamSupport.stream(
Spliterators.spliteratorUnknownSize(
new Iterator<T>() {
public T next() {
return e.nextElement();
}
public boolean hasNext() {
return e.hasMoreElements();
}
},
Spliterator.ORDERED), false);
}
|
@Test
public void test_stream_from_enumeration() {
Enumeration<Integer> someEnumeration = Collections.enumeration(Arrays.asList(1, 2, 3));
assertThat(EnumerationUtil.stream(someEnumeration).collect(Collectors.toList())).containsOnlyOnce(1, 2, 3);
}
|
public AclInfo getAcl(String addr, String subject, long millis) throws RemotingConnectException, RemotingSendRequestException, RemotingTimeoutException, InterruptedException, MQBrokerException {
GetAclRequestHeader requestHeader = new GetAclRequestHeader(subject);
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.AUTH_GET_ACL, requestHeader);
RemotingCommand response = this.remotingClient.invokeSync(addr, request, millis);
assert response != null;
switch (response.getCode()) {
case ResponseCode.SUCCESS: {
return RemotingSerializable.decode(response.getBody(), AclInfo.class);
}
default:
break;
}
throw new MQBrokerException(response.getCode(), response.getRemark());
}
|
@Test
public void assertGetAcl() throws RemotingException, InterruptedException, MQBrokerException {
mockInvokeSync();
setResponseBody(createAclInfo());
AclInfo actual = mqClientAPI.getAcl(defaultBrokerAddr, "", defaultTimeout);
assertNotNull(actual);
assertEquals("subject", actual.getSubject());
assertEquals(1, actual.getPolicies().size());
}
|
public static int validateValidHeaderValue(CharSequence value) {
int length = value.length();
if (length == 0) {
return -1;
}
if (value instanceof AsciiString) {
return verifyValidHeaderValueAsciiString((AsciiString) value);
}
return verifyValidHeaderValueCharSequence(value);
}
|
@Test
void headerValuesCannotEndWithNewlinesCharSequence() {
assertEquals(1, validateValidHeaderValue("a\n"));
assertEquals(1, validateValidHeaderValue("a\r"));
}
|
@Override
@MethodNotAvailable
public boolean putIfAbsent(K key, V value) {
throw new MethodNotAvailableException();
}
|
@Test(expected = MethodNotAvailableException.class)
public void testPutIfAbsent() {
adapter.putIfAbsent(23, "value");
}
|
@Override
public void getBytes(int index, ChannelBuffer dst, int dstIndex, int length) {
if (dst instanceof HeapChannelBuffer) {
getBytes(index, ((HeapChannelBuffer) dst).array, dstIndex, length);
} else {
dst.setBytes(dstIndex, array, index, length);
}
}
|
@Test
void testEqualsAndHashcode() {
HeapChannelBuffer b1 = new HeapChannelBuffer("hello-world".getBytes());
HeapChannelBuffer b2 = new HeapChannelBuffer("hello-world".getBytes());
MatcherAssert.assertThat(b1.equals(b2), is(true));
MatcherAssert.assertThat(b1.hashCode(), is(b2.hashCode()));
b1 = new HeapChannelBuffer("hello-world".getBytes());
b2 = new HeapChannelBuffer("hello-worldd".getBytes());
MatcherAssert.assertThat(b1.equals(b2), is(false));
MatcherAssert.assertThat(b1.hashCode(), not(b2.hashCode()));
}
|
public static List<File> recursiveListLocalDir(File dir) {
File[] files = dir.listFiles();
// File#listFiles can return null when the path is invalid
if (files == null) {
return Collections.emptyList();
}
List<File> result = new ArrayList<>(files.length);
for (File f : files) {
if (f.isDirectory()) {
result.addAll(recursiveListLocalDir(f));
continue;
}
result.add(f);
}
return result;
}
|
@Test
public void recursiveList() throws Exception {
File tmpDirFile = Files.createTempDir();
tmpDirFile.deleteOnExit();
Set<File> allFiles = new HashSet<>();
// Create 10 files at randomly deep level in the directory
for (int i = 0; i < 10; i++) {
createFileOrDir(tmpDirFile, i, new Random(), allFiles);
}
List<File> listedFiles = CommonUtils.recursiveListLocalDir(tmpDirFile);
assertEquals(allFiles, new HashSet<>(listedFiles));
}
|
public Map<String, Map<InetSocketAddress, ChannelInitializer<SocketChannel>>> newChannelInitializers() {
Map<String, Map<InetSocketAddress, ChannelInitializer<SocketChannel>>> channelInitializers = new HashMap<>();
Set<InetSocketAddress> addresses = new HashSet<>();
for (Map.Entry<String, ProxyExtensionWithClassLoader> extension : extensions.entrySet()) {
Map<InetSocketAddress, ChannelInitializer<SocketChannel>> initializers =
extension.getValue().newChannelInitializers();
initializers.forEach((address, initializer) -> {
if (!addresses.add(address)) {
log.error("extension for `{}` attempts to use {} for its listening port."
+ " But it is already occupied by other extensions.",
extension.getKey(), address);
throw new RuntimeException("extension for `" + extension.getKey()
+ "` attempts to use " + address + " for its listening port. But it is"
+ " already occupied by other messaging extensions");
}
endpoints.put(address, extension.getKey());
channelInitializers.put(extension.getKey(), initializers);
});
}
return channelInitializers;
}
|
@Test
public void testNewChannelInitializersSuccess() {
ChannelInitializer<SocketChannel> i1 = mock(ChannelInitializer.class);
ChannelInitializer<SocketChannel> i2 = mock(ChannelInitializer.class);
Map<InetSocketAddress, ChannelInitializer<SocketChannel>> p1Initializers = new HashMap<>();
p1Initializers.put(new InetSocketAddress("127.0.0.1", 6650), i1);
p1Initializers.put(new InetSocketAddress("127.0.0.2", 6651), i2);
ChannelInitializer<SocketChannel> i3 = mock(ChannelInitializer.class);
ChannelInitializer<SocketChannel> i4 = mock(ChannelInitializer.class);
Map<InetSocketAddress, ChannelInitializer<SocketChannel>> p2Initializers = new HashMap<>();
p2Initializers.put(new InetSocketAddress("127.0.0.3", 6650), i3);
p2Initializers.put(new InetSocketAddress("127.0.0.4", 6651), i4);
when(extension1.newChannelInitializers()).thenReturn(p1Initializers);
when(extension2.newChannelInitializers()).thenReturn(p2Initializers);
Map<String, Map<InetSocketAddress, ChannelInitializer<SocketChannel>>> initializers =
extensions.newChannelInitializers();
assertEquals(2, initializers.size());
assertSame(p1Initializers, initializers.get(protocol1));
assertSame(p2Initializers, initializers.get(protocol2));
}
|
@Override
public void upload(UploadTask uploadTask) throws IOException {
Throwable error = getErrorSafe();
if (error != null) {
LOG.debug("don't persist {} changesets, already failed", uploadTask.changeSets.size());
uploadTask.fail(error);
return;
}
LOG.debug("persist {} changeSets", uploadTask.changeSets.size());
try {
long size = uploadTask.getSize();
synchronized (lock) {
while (!uploadThrottle.hasCapacity()) {
lock.wait();
}
uploadThrottle.seizeCapacity(size);
if (!uploadThrottle.hasCapacity()) {
availabilityHelper.resetUnavailable();
}
scheduledBytesCounter += size;
scheduled.add(wrapWithSizeUpdate(uploadTask, size));
scheduleUploadIfNeeded();
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
uploadTask.fail(e);
throw new IOException(e);
} catch (Exception e) {
uploadTask.fail(e);
throw e;
}
}
|
@Test
void testErrorHandling() throws Exception {
TestingStateChangeUploader probe = new TestingStateChangeUploader();
DirectScheduledExecutorService scheduler = new DirectScheduledExecutorService();
ChangelogStorageMetricGroup metrics = createUnregisteredChangelogStorageMetricGroup();
try (BatchingStateChangeUploadScheduler store =
new BatchingStateChangeUploadScheduler(
Integer.MAX_VALUE,
MAX_BYTES_IN_FLIGHT,
MAX_BYTES_IN_FLIGHT,
RetryPolicy.NONE,
probe,
scheduler,
new RetryingExecutor(
5,
metrics.getAttemptsPerUpload(),
metrics.getTotalAttemptsPerUpload()),
metrics)) {
scheduler.shutdown();
assertThatThrownBy(() -> upload(store, getChanges(4)))
.isInstanceOf(RejectedExecutionException.class);
}
}
|
@Override
public String execute(CommandContext commandContext, String[] args) {
if (commandContext.isHttp()) {
Map<String, Object> result = new HashMap<>();
result.put("warnedClasses", serializeCheckUtils.getWarnedClasses());
return JsonUtils.toJson(result);
} else {
return "WarnedClasses: \n"
+ serializeCheckUtils.getWarnedClasses().stream().sorted().collect(Collectors.joining("\n"))
+ "\n\n";
}
}
|
@Test
void test() {
FrameworkModel frameworkModel = new FrameworkModel();
SerializeSecurityManager ssm = frameworkModel.getBeanFactory().getBean(SerializeSecurityManager.class);
SerializeWarnedClasses serializeWarnedClasses = new SerializeWarnedClasses(frameworkModel);
CommandContext commandContext1 = Mockito.mock(CommandContext.class);
Mockito.when(commandContext1.isHttp()).thenReturn(false);
CommandContext commandContext2 = Mockito.mock(CommandContext.class);
Mockito.when(commandContext2.isHttp()).thenReturn(true);
Assertions.assertFalse(
serializeWarnedClasses.execute(commandContext1, null).contains("Test1234"));
Assertions.assertFalse(
serializeWarnedClasses.execute(commandContext2, null).contains("Test1234"));
ssm.getWarnedClasses().add("Test1234");
Assertions.assertTrue(
serializeWarnedClasses.execute(commandContext1, null).contains("Test1234"));
Assertions.assertTrue(
serializeWarnedClasses.execute(commandContext2, null).contains("Test1234"));
Assertions.assertFalse(
serializeWarnedClasses.execute(commandContext1, null).contains("Test4321"));
Assertions.assertFalse(
serializeWarnedClasses.execute(commandContext2, null).contains("Test4321"));
ssm.getWarnedClasses().add("Test4321");
Assertions.assertTrue(
serializeWarnedClasses.execute(commandContext1, null).contains("Test4321"));
Assertions.assertTrue(
serializeWarnedClasses.execute(commandContext2, null).contains("Test4321"));
frameworkModel.destroy();
}
|
public boolean canRunAppAM(Resource amResource) {
if (Math.abs(maxAMShare - -1.0f) < 0.0001) {
return true;
}
Resource maxAMResource = computeMaxAMResource();
getMetrics().setMaxAMShare(maxAMResource);
Resource ifRunAMResource = Resources.add(amResourceUsage, amResource);
return Resources.fitsIn(ifRunAMResource, maxAMResource);
}
|
@Test
public void testCanRunAppAMReturnsFalse() {
conf.set(YarnConfiguration.RESOURCE_TYPES, CUSTOM_RESOURCE);
ResourceUtils.resetResourceTypes(conf);
resourceManager = new MockRM(conf);
resourceManager.start();
scheduler = (FairScheduler) resourceManager.getResourceScheduler();
Resource maxShare = Resource.newInstance(1024 * 8, 4,
ImmutableMap.of(CUSTOM_RESOURCE, 10L));
// Add a node to increase available memory and vcores in scheduler's
// root queue metrics
addNodeToScheduler(Resource.newInstance(4096, 10,
ImmutableMap.of(CUSTOM_RESOURCE, 25L)));
FSLeafQueue queue = setupQueue(maxShare);
//Min(availableMemory, maxShareMemory (maxResourceOverridden))
// --> Min(4096, 8192) = 4096
//Min(availableVCores, maxShareVCores (maxResourceOverridden))
// --> Min(10, 4) = 4
//Min(available test1, maxShare test1 (maxResourceOverridden))
// --> Min(25, 10) = 10
//MaxAMResource: (4096 MB memory, 4 vcores, 10 test1) * MAX_AM_SHARE
// --> 2048 MB memory, 2 vcores, 5 test1
Resource expectedAMShare = Resource.newInstance(2048, 2,
ImmutableMap.of(CUSTOM_RESOURCE, 5L));
Resource appAMResource = Resource.newInstance(2048, 2,
ImmutableMap.of(CUSTOM_RESOURCE, 6L));
Map<String, Long> customResourceValues =
verifyQueueMetricsForCustomResources(queue);
boolean result = queue.canRunAppAM(appAMResource);
assertFalse("AM should not have been allocated!", result);
verifyAMShare(queue, expectedAMShare, customResourceValues);
}
|
@Override
public ColumnStatisticsObj aggregate(List<ColStatsObjWithSourceInfo> colStatsWithSourceInfo,
List<String> partNames, boolean areAllPartsFound) throws MetaException {
checkStatisticsList(colStatsWithSourceInfo);
ColumnStatisticsObj statsObj = null;
String colType;
String colName = null;
// check if all the ColumnStatisticsObjs contain stats and all the ndv are
// bitvectors
boolean doAllPartitionContainStats = partNames.size() == colStatsWithSourceInfo.size();
NumDistinctValueEstimator ndvEstimator = null;
boolean areAllNDVEstimatorsMergeable = true;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
if (statsObj == null) {
colName = cso.getColName();
colType = cso.getColType();
statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType,
cso.getStatsData().getSetField());
LOG.trace("doAllPartitionContainStats for column: {} is: {}", colName, doAllPartitionContainStats);
}
LongColumnStatsDataInspector columnStatsData = longInspectorFromStats(cso);
// check if we can merge NDV estimators
if (columnStatsData.getNdvEstimator() == null) {
areAllNDVEstimatorsMergeable = false;
break;
} else {
NumDistinctValueEstimator estimator = columnStatsData.getNdvEstimator();
if (ndvEstimator == null) {
ndvEstimator = estimator;
} else {
if (!ndvEstimator.canMerge(estimator)) {
areAllNDVEstimatorsMergeable = false;
break;
}
}
}
}
if (areAllNDVEstimatorsMergeable && ndvEstimator != null) {
ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator);
}
LOG.debug("all of the bit vectors can merge for {} is {}", colName, areAllNDVEstimatorsMergeable);
ColumnStatisticsData columnStatisticsData = initColumnStatisticsData();
if (doAllPartitionContainStats || colStatsWithSourceInfo.size() < 2) {
LongColumnStatsDataInspector aggregateData = null;
long lowerBound = 0;
long higherBound = 0;
double densityAvgSum = 0.0;
LongColumnStatsMerger merger = new LongColumnStatsMerger();
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
LongColumnStatsDataInspector newData = longInspectorFromStats(cso);
lowerBound = Math.max(lowerBound, newData.getNumDVs());
higherBound += newData.getNumDVs();
densityAvgSum += ((double) (newData.getHighValue() - newData.getLowValue())) / newData.getNumDVs();
if (areAllNDVEstimatorsMergeable && ndvEstimator != null) {
ndvEstimator.mergeEstimators(newData.getNdvEstimator());
}
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
aggregateData.setLowValue(merger.mergeLowValue(
merger.getLowValue(aggregateData), merger.getLowValue(newData)));
aggregateData.setHighValue(merger.mergeHighValue(
merger.getHighValue(aggregateData), merger.getHighValue(newData)));
aggregateData.setNumNulls(merger.mergeNumNulls(aggregateData.getNumNulls(), newData.getNumNulls()));
aggregateData.setNumDVs(merger.mergeNumDVs(aggregateData.getNumDVs(), newData.getNumDVs()));
}
}
if (areAllNDVEstimatorsMergeable && ndvEstimator != null) {
// if all the ColumnStatisticsObjs contain bitvectors, we do not need to
// use uniform distribution assumption because we can merge bitvectors
// to get a good estimation.
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
} else {
long estimation;
if (useDensityFunctionForNDVEstimation) {
// We have estimation, lowerbound and higherbound. We use estimation
// if it is between lowerbound and higherbound.
double densityAvg = densityAvgSum / partNames.size();
estimation = (long) ((aggregateData.getHighValue() - aggregateData.getLowValue()) / densityAvg);
if (estimation < lowerBound) {
estimation = lowerBound;
} else if (estimation > higherBound) {
estimation = higherBound;
}
} else {
estimation = (long) (lowerBound + (higherBound - lowerBound) * ndvTuner);
}
aggregateData.setNumDVs(estimation);
}
columnStatisticsData.setLongStats(aggregateData);
} else {
// TODO: bail out if missing stats are over a certain threshold
// we need extrapolation
LOG.debug("start extrapolation for {}", colName);
Map<String, Integer> indexMap = new HashMap<>();
for (int index = 0; index < partNames.size(); index++) {
indexMap.put(partNames.get(index), index);
}
Map<String, Double> adjustedIndexMap = new HashMap<>();
Map<String, ColumnStatisticsData> adjustedStatsMap = new HashMap<>();
// while we scan the css, we also get the densityAvg, lowerbound and
// higherbound when useDensityFunctionForNDVEstimation is true.
double densityAvgSum = 0.0;
if (!areAllNDVEstimatorsMergeable) {
// if not every partition uses bitvector for ndv, we just fall back to
// the traditional extrapolation methods.
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
String partName = csp.getPartName();
LongColumnStatsData newData = cso.getStatsData().getLongStats();
if (useDensityFunctionForNDVEstimation) {
densityAvgSum += ((double) (newData.getHighValue() - newData.getLowValue())) / newData.getNumDVs();
}
adjustedIndexMap.put(partName, (double) indexMap.get(partName));
adjustedStatsMap.put(partName, cso.getStatsData());
}
} else {
// we first merge all the adjacent bitvectors that we could merge and
// derive new partition names and index.
StringBuilder pseudoPartName = new StringBuilder();
double pseudoIndexSum = 0;
int length = 0;
int curIndex = -1;
LongColumnStatsDataInspector aggregateData = null;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
String partName = csp.getPartName();
LongColumnStatsDataInspector newData = longInspectorFromStats(cso);
// newData.isSetBitVectors() should be true for sure because we
// already checked it before.
if (indexMap.get(partName) != curIndex) {
// There is bitvector, but it is not adjacent to the previous ones.
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setLongStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
if (useDensityFunctionForNDVEstimation) {
densityAvgSum += ((double) (aggregateData.getHighValue() - aggregateData.getLowValue())) / aggregateData.getNumDVs();
}
// reset everything
pseudoPartName = new StringBuilder();
pseudoIndexSum = 0;
length = 0;
ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator);
}
aggregateData = null;
}
curIndex = indexMap.get(partName);
pseudoPartName.append(partName);
pseudoIndexSum += curIndex;
length++;
curIndex++;
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
aggregateData.setLowValue(Math.min(aggregateData.getLowValue(), newData.getLowValue()));
aggregateData.setHighValue(Math.max(aggregateData.getHighValue(), newData.getHighValue()));
aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls());
}
ndvEstimator.mergeEstimators(newData.getNdvEstimator());
}
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setLongStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
if (useDensityFunctionForNDVEstimation) {
densityAvgSum += ((double) (aggregateData.getHighValue() - aggregateData.getLowValue())) / aggregateData.getNumDVs();
}
}
}
extrapolate(columnStatisticsData, partNames.size(), colStatsWithSourceInfo.size(),
adjustedIndexMap, adjustedStatsMap, densityAvgSum / adjustedStatsMap.size());
}
LOG.debug(
"Ndv estimation for {} is {}. # of partitions requested: {}. # of partitions found: {}",
colName, columnStatisticsData.getLongStats().getNumDVs(), partNames.size(),
colStatsWithSourceInfo.size());
KllHistogramEstimator mergedKllHistogramEstimator = mergeHistograms(colStatsWithSourceInfo);
if (mergedKllHistogramEstimator != null) {
columnStatisticsData.getLongStats().setHistogram(mergedKllHistogramEstimator.serialize());
}
statsObj.setStatsData(columnStatisticsData);
return statsObj;
}
|
@Test
public void testAggregateSingleStatWhenNullValues() throws MetaException {
List<String> partitions = Collections.singletonList("part1");
ColumnStatisticsData data1 = new ColStatsBuilder<>(long.class).numNulls(1).numDVs(2).build();
List<ColStatsObjWithSourceInfo> statsList =
Collections.singletonList(createStatsWithInfo(data1, TABLE, COL, partitions.get(0)));
LongColumnStatsAggregator aggregator = new LongColumnStatsAggregator();
ColumnStatisticsObj computedStatsObj = aggregator.aggregate(statsList, partitions, true);
assertEqualStatistics(data1, computedStatsObj.getStatsData());
aggregator.useDensityFunctionForNDVEstimation = true;
computedStatsObj = aggregator.aggregate(statsList, partitions, true);
assertEqualStatistics(data1, computedStatsObj.getStatsData());
aggregator.useDensityFunctionForNDVEstimation = false;
aggregator.ndvTuner = 1;
// ndv tuner does not have any effect because min numDVs and max numDVs coincide (we have a single stats)
computedStatsObj = aggregator.aggregate(statsList, partitions, true);
assertEqualStatistics(data1, computedStatsObj.getStatsData());
}
|
public DoubleArrayAsIterable usingExactEquality() {
return new DoubleArrayAsIterable(EXACT_EQUALITY_CORRESPONDENCE, iterableSubject());
}
|
@Test
public void usingExactEquality_containsAtLeast_primitiveDoubleArray_failure() {
expectFailureWhenTestingThat(array(1.1, 2.2, 3.3))
.usingExactEquality()
.containsAtLeast(array(2.2, 99.99));
assertFailureKeys(
"value of",
"missing (1)",
"---",
"expected to contain at least",
"testing whether",
"but was");
assertFailureValue("missing (1)", "99.99");
}
|
@Override
public void getConfig(ZookeeperServerConfig.Builder builder) {
ConfigServer[] configServers = getConfigServers();
int[] zookeeperIds = getConfigServerZookeeperIds();
if (configServers.length != zookeeperIds.length) {
throw new IllegalArgumentException(String.format("Number of provided config server hosts (%d) must be the " +
"same as number of provided config server zookeeper ids (%d)",
configServers.length, zookeeperIds.length));
}
String myhostname = HostName.getLocalhost();
// TODO: Server index should be in interval [1, 254] according to doc,
// however, we cannot change this id for an existing server
for (int i = 0; i < configServers.length; i++) {
if (zookeeperIds[i] < 0) {
throw new IllegalArgumentException(String.format("Zookeeper ids cannot be negative, was %d for %s",
zookeeperIds[i], configServers[i].hostName));
}
if (configServers[i].hostName.equals(myhostname)) {
builder.myid(zookeeperIds[i]);
}
builder.server(getZkServer(configServers[i], zookeeperIds[i]));
}
if (options.zookeeperClientPort().isPresent()) {
builder.clientPort(options.zookeeperClientPort().get());
}
if (options.hostedVespa().orElse(false)) {
builder.vespaTlsConfigFile(Defaults.getDefaults().underVespaHome("var/zookeeper/conf/tls.conf.json"));
}
boolean isHostedVespa = options.hostedVespa().orElse(false);
builder.dynamicReconfiguration(isHostedVespa);
builder.reconfigureEnsemble(!isHostedVespa);
builder.snapshotMethod(options.zooKeeperSnapshotMethod());
builder.juteMaxBuffer(options.zookeeperJuteMaxBuffer());
}
|
@Test
void zookeeperConfig_negative_zk_id() {
assertThrows(IllegalArgumentException.class, () -> {
TestOptions testOptions = createTestOptions(List.of("cfg1", "localhost", "cfg3"), List.of(1, 2, -1));
getConfig(ZookeeperServerConfig.class, testOptions);
});
}
|
public static Ip6Address valueOf(byte[] value) {
return new Ip6Address(value);
}
|
@Test
public void testAddressToOctetsIPv6() {
Ip6Address ipAddress;
byte[] value;
value = new byte[] {0x11, 0x11, 0x22, 0x22,
0x33, 0x33, 0x44, 0x44,
0x55, 0x55, 0x66, 0x66,
0x77, 0x77,
(byte) 0x88, (byte) 0x88};
ipAddress =
Ip6Address.valueOf("1111:2222:3333:4444:5555:6666:7777:8888");
assertThat(ipAddress.toOctets(), is(value));
value = new byte[] {0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00};
ipAddress = Ip6Address.valueOf("::");
assertThat(ipAddress.toOctets(), is(value));
value = new byte[] {(byte) 0xff, (byte) 0xff,
(byte) 0xff, (byte) 0xff,
(byte) 0xff, (byte) 0xff,
(byte) 0xff, (byte) 0xff,
(byte) 0xff, (byte) 0xff,
(byte) 0xff, (byte) 0xff,
(byte) 0xff, (byte) 0xff,
(byte) 0xff, (byte) 0xff};
ipAddress =
Ip6Address.valueOf("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff");
assertThat(ipAddress.toOctets(), is(value));
}
|
@Override
public CompletableFuture<ConsumerGroupHeartbeatResponseData> consumerGroupHeartbeat(
RequestContext context,
ConsumerGroupHeartbeatRequestData request
) {
if (!isActive.get()) {
return CompletableFuture.completedFuture(new ConsumerGroupHeartbeatResponseData()
.setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code())
);
}
return runtime.scheduleWriteOperation(
"consumer-group-heartbeat",
topicPartitionFor(request.groupId()),
Duration.ofMillis(config.offsetCommitTimeoutMs()),
coordinator -> coordinator.consumerGroupHeartbeat(context, request)
).exceptionally(exception -> handleOperationException(
"consumer-group-heartbeat",
request,
exception,
(error, message) -> new ConsumerGroupHeartbeatResponseData()
.setErrorCode(error.code())
.setErrorMessage(message)
));
}
|
@Test
public void testConsumerGroupHeartbeat() throws ExecutionException, InterruptedException, TimeoutException {
CoordinatorRuntime<GroupCoordinatorShard, CoordinatorRecord> runtime = mockRuntime();
GroupCoordinatorService service = new GroupCoordinatorService(
new LogContext(),
createConfig(),
runtime,
new GroupCoordinatorMetrics(),
createConfigManager()
);
ConsumerGroupHeartbeatRequestData request = new ConsumerGroupHeartbeatRequestData()
.setGroupId("foo");
service.startup(() -> 1);
when(runtime.scheduleWriteOperation(
ArgumentMatchers.eq("consumer-group-heartbeat"),
ArgumentMatchers.eq(new TopicPartition("__consumer_offsets", 0)),
ArgumentMatchers.eq(Duration.ofMillis(5000)),
ArgumentMatchers.any()
)).thenReturn(CompletableFuture.completedFuture(
new ConsumerGroupHeartbeatResponseData()
));
CompletableFuture<ConsumerGroupHeartbeatResponseData> future = service.consumerGroupHeartbeat(
requestContext(ApiKeys.CONSUMER_GROUP_HEARTBEAT),
request
);
assertEquals(new ConsumerGroupHeartbeatResponseData(), future.get(5, TimeUnit.SECONDS));
}
|
@Override
public <T> ResponseFuture<T> sendRequest(Request<T> request, RequestContext requestContext)
{
FutureCallback<Response<T>> callback = new FutureCallback<>();
sendRequest(request, requestContext, callback);
return new ResponseFutureImpl<>(callback);
}
|
@SuppressWarnings("deprecation")
@Test(dataProvider = TestConstants.RESTLI_PROTOCOL_1_2_PREFIX + "sendRequestOptions")
public void testRestLiResponseExceptionCallback(SendRequestOption option,
TimeoutOption timeoutOption,
ProtocolVersionOption versionOption,
ProtocolVersion protocolVersion,
String errorResponseHeaderName,
ContentType contentType)
throws ExecutionException, TimeoutException, InterruptedException, RestLiDecodingException
{
final String ERR_KEY = "someErr";
final String ERR_VALUE = "WHOOPS!";
final String ERR_MSG = "whoops2";
final int HTTP_CODE = 400;
final int APP_CODE = 666;
final String CODE = "INVALID_INPUT";
final String DOC_URL = "https://example.com/errors/invalid-input";
final String REQUEST_ID = "abc123";
RestClient client = mockClient(ERR_KEY, ERR_VALUE, ERR_MSG, HTTP_CODE, APP_CODE, CODE, DOC_URL, REQUEST_ID,
protocolVersion, errorResponseHeaderName);
Request<EmptyRecord> request = mockRequest(EmptyRecord.class, versionOption, contentType);
RequestBuilder<Request<EmptyRecord>> requestBuilder = mockRequestBuilder(request);
FutureCallback<Response<EmptyRecord>> callback = new FutureCallback<>();
try
{
sendRequest(option, client, request, requestBuilder, callback);
Long l = timeoutOption._l;
TimeUnit timeUnit = timeoutOption._timeUnit;
Response<EmptyRecord> response = l == null ? callback.get() : callback.get(l, timeUnit);
Assert.fail("Should have thrown");
}
catch (ExecutionException e)
{
// New
Throwable cause = e.getCause();
Assert.assertTrue(cause instanceof RestLiResponseException, "Expected RestLiResponseException not " + cause.getClass().getName());
RestLiResponseException rlre = (RestLiResponseException)cause;
Assert.assertEquals(HTTP_CODE, rlre.getStatus());
Assert.assertEquals(ERR_VALUE, rlre.getErrorDetails().get(ERR_KEY));
Assert.assertEquals(APP_CODE, rlre.getServiceErrorCode());
Assert.assertEquals(ERR_MSG, rlre.getServiceErrorMessage());
Assert.assertEquals(CODE, rlre.getCode());
Assert.assertEquals(DOC_URL, rlre.getDocUrl());
Assert.assertEquals(REQUEST_ID, rlre.getRequestId());
Assert.assertEquals(EmptyRecord.class.getCanonicalName(), rlre.getErrorDetailType());
Assert.assertNotNull(rlre.getErrorDetailsRecord());
Assert.assertTrue(rlre.getErrorDetailsRecord() instanceof EmptyRecord);
// Old
Assert.assertTrue(cause instanceof RestException, "Expected RestException not " + cause.getClass().getName());
RestException re = (RestException)cause;
RestResponse r = re.getResponse();
ErrorResponse er = new EntityResponseDecoder<>(ErrorResponse.class).decodeResponse(r).getEntity();
Assert.assertEquals(HTTP_CODE, r.getStatus());
Assert.assertEquals(ERR_VALUE, er.getErrorDetails().data().getString(ERR_KEY));
Assert.assertEquals(APP_CODE, er.getServiceErrorCode().intValue());
Assert.assertEquals(ERR_MSG, er.getMessage());
}
}
|
@Override
public void setAttemptCount(JobVertexID jobVertexId, int subtaskIndex, int attemptNumber) {
Preconditions.checkArgument(subtaskIndex >= 0);
Preconditions.checkArgument(attemptNumber >= 0);
final List<Integer> attemptCounts =
vertexSubtaskToAttemptCounts.computeIfAbsent(
jobVertexId, ignored -> new ArrayList<>(32));
while (subtaskIndex >= attemptCounts.size()) {
attemptCounts.add(0);
}
attemptCounts.set(subtaskIndex, attemptNumber);
}
|
@Test
void testSetAttemptCount() {
final DefaultVertexAttemptNumberStore vertexAttemptNumberStore =
new DefaultVertexAttemptNumberStore();
final JobVertexID jobVertexId = new JobVertexID();
final int subtaskIndex = 4;
final int attemptCount = 2;
vertexAttemptNumberStore.setAttemptCount(jobVertexId, subtaskIndex, attemptCount);
assertThat(
vertexAttemptNumberStore
.getAttemptCounts(jobVertexId)
.getAttemptCount(subtaskIndex))
.isEqualTo(attemptCount);
}
|
public Integer doCall() throws Exception {
String projectName = getProjectName();
String workingDir = RUN_PLATFORM_DIR + "/" + projectName;
printer().println("Exporting application ...");
// Cache export output in String for later usage in case of error
Printer runPrinter = printer();
StringPrinter exportPrinter = new StringPrinter();
getMain().withPrinter(exportPrinter);
KubernetesExport export = new KubernetesExport(
getMain(), new KubernetesExport.ExportConfigurer(
runtime,
quarkusVersion,
true,
true,
false,
workingDir,
List.of(filePaths),
gav,
openApi,
true,
true,
false,
false,
"off"));
export.image = image;
export.imageRegistry = imageRegistry;
export.imageGroup = imageGroup;
export.imageBuilder = imageBuilder;
export.clusterType = clusterType;
export.traitProfile = traitProfile;
export.serviceAccount = serviceAccount;
export.properties = properties;
export.configs = configs;
export.resources = resources;
export.envVars = envVars;
export.volumes = volumes;
export.connects = connects;
export.annotations = annotations;
export.labels = labels;
export.traits = traits;
int exit = export.export();
// Revert printer to this run command's printer
getMain().withPrinter(runPrinter);
if (exit != 0) {
// print export command output with error details
printer().println(exportPrinter.getOutput());
return exit;
}
if (output != null) {
if (RuntimeType.quarkus == runtime) {
exit = buildQuarkus(workingDir);
} else if (RuntimeType.springBoot == runtime) {
exit = buildSpringBoot(workingDir);
}
if (exit != 0) {
printer().println("Project build failed!");
return exit;
}
File manifest;
switch (output) {
case "yaml" -> manifest = KubernetesHelper.resolveKubernetesManifest(workingDir + "/target/kubernetes");
case "json" ->
manifest = KubernetesHelper.resolveKubernetesManifest(workingDir + "/target/kubernetes", "json");
default -> {
printer().printf("Unsupported output format '%s' (supported: yaml, json)%n", output);
return 1;
}
}
try (FileInputStream fis = new FileInputStream(manifest)) {
printer().println(IOHelper.loadText(fis));
}
return 0;
}
if (RuntimeType.quarkus == runtime) {
exit = deployQuarkus(workingDir);
} else if (RuntimeType.springBoot == runtime) {
exit = deploySpringBoot(workingDir);
}
if (exit != 0) {
printer().println("Deployment to %s failed!".formatted(Optional.ofNullable(clusterType)
.map(StringHelper::capitalize).orElse("Kubernetes")));
return exit;
}
if (dev) {
DefaultCamelContext reloadContext = new DefaultCamelContext(false);
configureFileWatch(reloadContext, export, workingDir);
reloadContext.start();
if (cleanup) {
installShutdownInterceptor(projectName, workingDir);
}
}
if (dev || wait || logs) {
client(Pod.class).withLabel(BaseTrait.INTEGRATION_LABEL, projectName)
.waitUntilCondition(it -> "Running".equals(it.getStatus().getPhase()), 10, TimeUnit.MINUTES);
}
if (dev || logs) {
PodLogs logsCommand = new PodLogs(getMain());
logsCommand.withClient(client());
logsCommand.label = "%s=%s".formatted(BaseTrait.INTEGRATION_LABEL, projectName);
logsCommand.doCall();
}
return 0;
}
|
@Test
public void shouldGenerateKubernetesManifest() throws Exception {
KubernetesRun command = createCommand();
command.filePaths = new String[] { "classpath:route.yaml" };
int exit = command.doCall();
Assertions.assertEquals(0, exit);
List<HasMetadata> resources = kubernetesClient.load(getKubernetesManifestAsStream(printer.getOutput())).items();
Assertions.assertEquals(3, resources.size());
Deployment deployment = resources.stream()
.filter(it -> Deployment.class.isAssignableFrom(it.getClass()))
.map(Deployment.class::cast)
.findFirst()
.orElseThrow(() -> new RuntimeCamelException("Missing deployment in Kubernetes manifest"));
Assertions.assertEquals("route", deployment.getMetadata().getName());
Assertions.assertEquals(1, deployment.getSpec().getTemplate().getSpec().getContainers().size());
Assertions.assertEquals("route", deployment.getMetadata().getLabels().get(BaseTrait.INTEGRATION_LABEL));
Assertions.assertEquals("route", deployment.getSpec().getTemplate().getSpec().getContainers().get(0).getName());
Assertions.assertEquals(3, deployment.getSpec().getSelector().getMatchLabels().size());
Assertions.assertEquals("route", deployment.getSpec().getSelector().getMatchLabels().get(BaseTrait.INTEGRATION_LABEL));
Assertions.assertEquals("docker.io/camel-test/route:1.0-SNAPSHOT",
deployment.getSpec().getTemplate().getSpec().getContainers().get(0).getImage());
Assertions.assertEquals("Always",
deployment.getSpec().getTemplate().getSpec().getContainers().get(0).getImagePullPolicy());
}
|
public TimestampOffset entry(int n) {
return maybeLock(lock, () -> {
if (n >= entries())
throw new IllegalArgumentException("Attempt to fetch the " + n + "th entry from time index "
+ file().getAbsolutePath() + " which has size " + entries());
return parseEntry(mmap(), n);
});
}
|
@Test
public void testEntry() {
appendEntries(maxEntries - 1);
assertEquals(new TimestampOffset(10L, 55L), idx.entry(0));
assertEquals(new TimestampOffset(20L, 65L), idx.entry(1));
assertEquals(new TimestampOffset(30L, 75L), idx.entry(2));
assertEquals(new TimestampOffset(40L, 85L), idx.entry(3));
}
|
public FEELFnResult<BigDecimal> invoke(@ParameterName( "n" ) BigDecimal n) {
return invoke(n, BigDecimal.ZERO);
}
|
@Test
void invokeRoundingOdd() {
FunctionTestUtil.assertResult(roundHalfDownFunction.invoke(BigDecimal.valueOf(10.35)), BigDecimal.valueOf(10));
FunctionTestUtil.assertResult(roundHalfDownFunction.invoke(BigDecimal.valueOf(10.35), BigDecimal.ONE),
BigDecimal.valueOf(10.3));
}
|
public static <T> Predicate<T> instantiatePredicateClass(Class<? extends Predicate<T>> clazz) {
try {
Constructor<? extends Predicate<T>> c = clazz.getConstructor();
if (c != null) {
return c.newInstance();
} else {
throw new InstantiationException(INSTANTIATION_ERROR_PREFIX + clazz.getName());
}
} catch (Exception e) {
throw new InstantiationException(INSTANTIATION_ERROR_PREFIX + clazz.getName(), e);
}
}
|
@Test
public void shouldInstantiatePredicateClass() {
assertThat(ClassUtils.instantiatePredicateClass(PublicPredicate.class)).isNotNull();
}
|
public PackageDefinition findPackageDefinitionWith(String packageId) {
for (PackageRepository packageRepository : this) {
for (PackageDefinition packageDefinition : packageRepository.getPackages()) {
if (packageDefinition.getId().equals(packageId)) {
return packageDefinition;
}
}
}
return null;
}
|
@Test
void shouldGetPackageDefinitionForGivenPackageId() throws Exception {
PackageRepository repo1 = PackageRepositoryMother.create("repo-id1", "repo1", "plugin-id", "1.0", null);
PackageDefinition packageDefinitionOne = PackageDefinitionMother.create("pid1", repo1);
PackageDefinition packageDefinitionTwo = PackageDefinitionMother.create("pid2", repo1);
repo1.getPackages().addAll(List.of(packageDefinitionOne, packageDefinitionTwo));
PackageRepository repo2 = PackageRepositoryMother.create("repo-id2", "repo2", "plugin-id", "1.0", null);
PackageDefinition packageDefinitionThree = PackageDefinitionMother.create("pid3", repo2);
PackageDefinition packageDefinitionFour = PackageDefinitionMother.create("pid4", repo2);
repo2.getPackages().addAll(List.of(packageDefinitionThree, packageDefinitionFour));
PackageRepositories packageRepositories = new PackageRepositories(repo1, repo2);
assertThat(packageRepositories.findPackageDefinitionWith("pid3")).isEqualTo(packageDefinitionThree);
assertThat(packageRepositories.findPackageDefinitionWith("pid5")).isNull();
}
|
public KsqlTarget target(final URI server) {
return target(server, Collections.emptyMap());
}
|
@Test
public void shouldRequestStatuses() {
// Given:
CommandStatuses commandStatuses = new CommandStatuses(new HashMap<>());
server.setResponseObject(commandStatuses);
// When:
KsqlTarget target = ksqlClient.target(serverUri);
RestResponse<CommandStatuses> response = target.getStatuses();
// Then:
assertThat(server.getHttpMethod(), is(HttpMethod.GET));
assertThat(server.getBody(), nullValue());
assertThat(server.getPath(), is("/status"));
assertThat(server.getHeaders().get("Accept"), is("application/json"));
assertThat(response.get(), is(commandStatuses));
}
|
@Override
public TransformResultMetadata getResultMetadata() {
return new TransformResultMetadata(_lookupColumnFieldSpec.getDataType(),
_lookupColumnFieldSpec.isSingleValueField(), false);
}
|
@Test
public void resultDataTypeTest()
throws Exception {
HashMap<String, FieldSpec.DataType> testCases = new HashMap<String, FieldSpec.DataType>() {{
put("teamName", FieldSpec.DataType.STRING);
put("teamInteger", FieldSpec.DataType.INT);
put("teamFloat", FieldSpec.DataType.FLOAT);
put("teamLong", FieldSpec.DataType.LONG);
put("teamDouble", FieldSpec.DataType.DOUBLE);
}};
for (Map.Entry<String, FieldSpec.DataType> testCase : testCases.entrySet()) {
ExpressionContext expression = RequestContextUtils.getExpression(
String.format("lookup('baseballTeams','%s','teamID',%s)", testCase.getKey(), STRING_SV_COLUMN));
TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
Assert.assertEquals(transformFunction.getResultMetadata().getDataType(), testCase.getValue(),
String.format("Expecting %s data type for lookup column: '%s'", testCase.getKey(), testCase.getValue()));
}
}
|
@Override
public Region updateRegion(RegionId regionId, String name, Region.Type type,
List<Set<NodeId>> masterNodeIds) {
checkNotNull(regionId, REGION_ID_NULL);
checkNotNull(name, NAME_NULL);
checkNotNull(type, REGION_TYPE_NULL);
return store.updateRegion(regionId, name, type, genAnnots(regionId),
masterNodeIds == null ? of() : masterNodeIds);
}
|
@Test(expected = ItemNotFoundException.class)
public void missingUpdate() {
service.updateRegion(RID1, "R1", METRO, MASTERS);
}
|
@Bean("shiroSecurityManager")
public DefaultWebSecurityManager securityManager(@Lazy @Qualifier("shiroRealm") final AuthorizingRealm shiroRealm) {
DefaultWebSecurityManager securityManager = new DefaultWebSecurityManager();
securityManager.setRealm(shiroRealm);
return securityManager;
}
|
@Test
public void testSecurityManager() {
AuthorizingRealm realm = mock(AuthorizingRealm.class);
DefaultWebSecurityManager securityManager = shiroConfiguration.securityManager(realm);
Object[] realms = securityManager.getRealms().toArray();
assertEquals(1, realms.length);
assert Objects.equals(realm, realms[0]);
}
|
@Override
public Optional<QueryId> chooseQueryToKill(List<QueryMemoryInfo> runningQueries, List<MemoryInfo> nodes)
{
Map<QueryId, Long> memoryReservationOnBlockedNodes = new HashMap<>();
for (MemoryInfo node : nodes) {
MemoryPoolInfo generalPool = node.getPools().get(GENERAL_POOL);
if (generalPool == null) {
continue;
}
if (generalPool.getFreeBytes() + generalPool.getReservedRevocableBytes() > 0) {
continue;
}
Map<QueryId, Long> queryMemoryReservations = generalPool.getQueryMemoryReservations();
queryMemoryReservations.forEach((queryId, memoryReservation) -> {
memoryReservationOnBlockedNodes.compute(queryId, (id, oldValue) -> oldValue == null ? memoryReservation : oldValue + memoryReservation);
});
}
return memoryReservationOnBlockedNodes.entrySet().stream()
.max(comparingLong(Map.Entry::getValue))
.map(Map.Entry::getKey);
}
|
@Test
public void testGeneralPoolNotBlocked()
{
int reservePool = 10;
int generalPool = 12;
Map<String, Map<String, Long>> queries = ImmutableMap.<String, Map<String, Long>>builder()
.put("q_1", ImmutableMap.of("n1", 0L, "n2", 6L, "n3", 0L, "n4", 0L, "n5", 0L))
.put("q_2", ImmutableMap.of("n1", 3L, "n2", 5L, "n3", 2L, "n4", 4L, "n5", 0L))
.put("q_r", ImmutableMap.of("n1", 6L, "n2", 6L, "n3", 6L, "n4", 6L, "n5", 6L))
.build();
assertEquals(
lowMemoryKiller.chooseQueryToKill(
toQueryMemoryInfoList("q_r", queries),
toNodeMemoryInfoList(reservePool, generalPool, "q_r", queries)),
Optional.empty());
}
|
public static Result<Void> success() {
return new Result<Void>()
.setCode(Result.SUCCESS_CODE);
}
|
@Test
public void testSuccess() {
Object o = new Object();
Assert.isTrue(o.equals(Results.success(o).getData()));
Assert.isTrue(Result.SUCCESS_CODE.equals(Results.success().getCode()));
}
|
@VisibleForTesting
void notifyTokenExpiration() {
try {
// Avoid notification multiple times in case of data center edition
if (!lockManager.tryLock(LOCK_NAME, LOCK_DURATION)) {
return;
}
notificationSender.sendNotifications();
} catch (RuntimeException e) {
LOG.error("Error in sending token expiration notification", e);
}
}
|
@Test
public void log_error_if_exception_in_sending_notification() {
when(lockManager.tryLock(anyString(), anyInt())).thenReturn(true);
doThrow(new IllegalStateException()).when(notificationSender).sendNotifications();
underTest.notifyTokenExpiration();
assertThat(logTester.getLogs(LoggerLevel.ERROR))
.extracting(LogAndArguments::getFormattedMsg)
.containsExactly("Error in sending token expiration notification");
}
|
@Override
public void removeMember(String memberId) {
ConsumerGroupMember oldMember = members.remove(memberId);
maybeUpdateSubscribedTopicNamesAndGroupSubscriptionType(oldMember, null);
maybeUpdateServerAssignors(oldMember, null);
maybeRemovePartitionEpoch(oldMember);
removeStaticMember(oldMember);
maybeUpdateGroupState();
maybeUpdateNumClassicProtocolMembers(oldMember, null);
maybeUpdateClassicProtocolMembersSupportedProtocols(oldMember, null);
}
|
@Test
public void testRemoveMember() {
ConsumerGroup consumerGroup = createConsumerGroup("foo");
ConsumerGroupMember member = consumerGroup.getOrMaybeCreateMember("member", true);
consumerGroup.updateMember(member);
assertTrue(consumerGroup.hasMember("member"));
consumerGroup.removeMember("member");
assertFalse(consumerGroup.hasMember("member"));
}
|
static Builder builder() {
return new AutoValue_PubsubRowToMessage.Builder();
}
|
@Test
public void testSetTargetTimestampAttributeName() {
Instant mockTimestamp = Instant.now();
String mockTimestampString = mockTimestamp.toString();
byte[] bytes = new byte[] {1, 2, 3, 4};
Field payloadBytesField = Field.of(DEFAULT_PAYLOAD_KEY_NAME, FieldType.BYTES);
Schema withPayloadBytesSchema = Schema.of(payloadBytesField);
Row withPayloadBytes = Row.withSchema(withPayloadBytesSchema).attachValues(bytes);
String customTargetTimestampAttributeName = "custom_timestamp_key";
PubsubRowToMessage withoutSetTargetTimestampAttributeName =
PubsubRowToMessage.builder().setMockInstant(mockTimestamp).build();
PubsubRowToMessage withSetTargetTimestampAttributeName =
PubsubRowToMessage.builder()
.setMockInstant(mockTimestamp)
.setTargetTimestampAttributeName(customTargetTimestampAttributeName)
.build();
PCollection<Row> input =
pipeline.apply(Create.of(withPayloadBytes)).setRowSchema(Schema.of(payloadBytesField));
PAssert.that(input.apply(withoutSetTargetTimestampAttributeName).get(OUTPUT))
.containsInAnyOrder(
new PubsubMessage(
bytes, ImmutableMap.of(DEFAULT_EVENT_TIMESTAMP_KEY_NAME, mockTimestampString)));
PAssert.that(input.apply(withSetTargetTimestampAttributeName).get(OUTPUT))
.containsInAnyOrder(
new PubsubMessage(
bytes, ImmutableMap.of(customTargetTimestampAttributeName, mockTimestampString)));
pipeline.run(PIPELINE_OPTIONS);
}
|
public Object extract(Object target, String attributeName, Object metadata) {
return extract(target, attributeName, metadata, true);
}
|
@Test
public void when_extractExtractor_then_correctValue() {
// GIVEN
AttributeConfig config
= new AttributeConfig("gimmePower", "com.hazelcast.query.impl.getters.ExtractorsTest$PowerExtractor");
Extractors extractors = createExtractors(config);
// WHEN
Object power = extractors.extract(bond, "gimmePower", null);
// THEN
assertThat((Integer) power).isEqualTo(550);
}
|
@Override
public WatchKey register(final Watchable folder,
final WatchEvent.Kind<?>[] events,
final WatchEvent.Modifier... modifiers)
throws IOException {
if(log.isInfoEnabled()) {
log.info(String.format("Register file %s for events %s", folder, Arrays.toString(events)));
}
final Pointer[] values = {
CFStringRef.toCFString(folder.toString()).getPointer()};
final MacOSXWatchKey key = new MacOSXWatchKey(folder, this, events);
final double latency = 1.0; // Latency in seconds
final Map<File, Long> timestamps = createLastModifiedMap(new File(folder.toString()));
final FSEvents.FSEventStreamCallback callback = new Callback(key, timestamps);
final FSEventStreamRef stream = library.FSEventStreamCreate(
Pointer.NULL, callback, Pointer.NULL,
library.CFArrayCreate(null, values, CFIndex.valueOf(1), null),
-1, latency,
kFSEventStreamCreateFlagNoDefer);
final CountDownLatch lock = new CountDownLatch(1);
final CFRunLoop loop = new CFRunLoop(lock, stream);
threadFactory.newThread(loop).start();
Uninterruptibles.awaitUninterruptibly(lock);
loops.put(key, loop);
callbacks.put(key, callback);
return key;
}
|
@Test
public void testRegister() throws Exception {
final RegisterWatchService fs = new FSEventWatchService();
final Watchable folder = Paths.get(
File.createTempFile(UUID.randomUUID().toString(), "t").getParent());
final WatchKey key = fs.register(folder, new WatchEvent.Kind[]{ENTRY_CREATE, ENTRY_DELETE, ENTRY_MODIFY});
assertTrue(key.isValid());
fs.close();
assertFalse(key.isValid());
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.