focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@Override public TraceContext context() {
return context;
}
|
@Test void hasRealContext() {
assertThat(span.context().spanId()).isNotZero();
}
|
public static void warn(final Logger logger, final String format, final Supplier<Object> supplier) {
if (logger.isWarnEnabled()) {
logger.warn(format, supplier.get());
}
}
|
@Test
public void testAtLeastOnceWarn() {
when(logger.isWarnEnabled()).thenReturn(true);
LogUtils.warn(logger, supplier);
verify(supplier, atLeastOnce()).get();
}
|
public void addMapStatistics(DwrfProto.KeyInfo key, ColumnStatistics columnStatistics)
{
requireNonNull(key, "key is null");
requireNonNull(columnStatistics, "columnStatistics is null");
hasEntries = true;
if (collectKeyStats) {
entries.add(new MapStatisticsEntry(key, columnStatistics));
}
}
|
@Test(dataProvider = "keySupplier")
public void testAddMapStatistics(KeyInfo[] keys)
{
KeyInfo key1 = keys[0];
KeyInfo key2 = keys[1];
ColumnStatistics columnStatistics1 = new ColumnStatistics(3L, null, null, null);
ColumnStatistics columnStatistics2 = new ColumnStatistics(5L, null, null, null);
MapColumnStatisticsBuilder builder = new MapColumnStatisticsBuilder(true);
builder.addMapStatistics(key1, columnStatistics1);
builder.addMapStatistics(key2, columnStatistics2);
builder.increaseValueCount(10);
MapColumnStatistics columnStatistics = (MapColumnStatistics) builder.buildColumnStatistics();
assertEquals(columnStatistics.getNumberOfValues(), 10L);
MapStatistics mapStatistics = columnStatistics.getMapStatistics();
List<MapStatisticsEntry> entries = mapStatistics.getEntries();
assertEquals(entries.size(), 2);
assertEquals(entries.get(0).getKey(), key1);
assertEquals(entries.get(0).getColumnStatistics(), columnStatistics1);
assertEquals(entries.get(1).getKey(), key2);
assertEquals(entries.get(1).getColumnStatistics(), columnStatistics2);
}
|
public CoordinatorResult<OffsetCommitResponseData, CoordinatorRecord> commitOffset(
RequestContext context,
OffsetCommitRequestData request
) throws ApiException {
Group group = validateOffsetCommit(context, request);
// In the old consumer group protocol, the offset commits maintain the session if
// the group is in Stable or PreparingRebalance state.
if (group.type() == Group.GroupType.CLASSIC) {
ClassicGroup classicGroup = (ClassicGroup) group;
if (classicGroup.isInState(ClassicGroupState.STABLE) || classicGroup.isInState(ClassicGroupState.PREPARING_REBALANCE)) {
groupMetadataManager.rescheduleClassicGroupMemberHeartbeat(
classicGroup,
classicGroup.member(request.memberId())
);
}
}
final OffsetCommitResponseData response = new OffsetCommitResponseData();
final List<CoordinatorRecord> records = new ArrayList<>();
final long currentTimeMs = time.milliseconds();
final OptionalLong expireTimestampMs = expireTimestampMs(request.retentionTimeMs(), currentTimeMs);
request.topics().forEach(topic -> {
final OffsetCommitResponseTopic topicResponse = new OffsetCommitResponseTopic().setName(topic.name());
response.topics().add(topicResponse);
topic.partitions().forEach(partition -> {
if (isMetadataInvalid(partition.committedMetadata())) {
topicResponse.partitions().add(new OffsetCommitResponsePartition()
.setPartitionIndex(partition.partitionIndex())
.setErrorCode(Errors.OFFSET_METADATA_TOO_LARGE.code()));
} else {
log.debug("[GroupId {}] Committing offsets {} for partition {}-{} from member {} with leader epoch {}.",
request.groupId(), partition.committedOffset(), topic.name(), partition.partitionIndex(),
request.memberId(), partition.committedLeaderEpoch());
topicResponse.partitions().add(new OffsetCommitResponsePartition()
.setPartitionIndex(partition.partitionIndex())
.setErrorCode(Errors.NONE.code()));
final OffsetAndMetadata offsetAndMetadata = OffsetAndMetadata.fromRequest(
partition,
currentTimeMs,
expireTimestampMs
);
records.add(GroupCoordinatorRecordHelpers.newOffsetCommitRecord(
request.groupId(),
topic.name(),
partition.partitionIndex(),
offsetAndMetadata,
metadataImage.features().metadataVersion()
));
}
});
});
if (!records.isEmpty()) {
metrics.record(GroupCoordinatorMetrics.OFFSET_COMMITS_SENSOR_NAME, records.size());
}
return new CoordinatorResult<>(records, response);
}
|
@Test
public void testOffsetCommitsSensor() {
OffsetMetadataManagerTestContext context = new OffsetMetadataManagerTestContext.Builder().build();
// Create an empty group.
ClassicGroup group = context.groupMetadataManager.getOrMaybeCreateClassicGroup(
"foo",
true
);
// Add member.
group.add(mkGenericMember("member", Optional.of("new-instance-id")));
// Transition to next generation.
group.transitionTo(ClassicGroupState.PREPARING_REBALANCE);
group.initNextGeneration();
assertEquals(1, group.generationId());
group.transitionTo(ClassicGroupState.STABLE);
CoordinatorResult<OffsetCommitResponseData, CoordinatorRecord> result = context.commitOffset(
new OffsetCommitRequestData()
.setGroupId("foo")
.setMemberId("member")
.setGenerationIdOrMemberEpoch(1)
.setRetentionTimeMs(1234L)
.setTopics(Collections.singletonList(
new OffsetCommitRequestData.OffsetCommitRequestTopic()
.setName("bar")
.setPartitions(Arrays.asList(
new OffsetCommitRequestData.OffsetCommitRequestPartition()
.setPartitionIndex(0)
.setCommittedOffset(100L),
new OffsetCommitRequestData.OffsetCommitRequestPartition()
.setPartitionIndex(1)
.setCommittedOffset(150L)
))
))
);
verify(context.metrics).record(OFFSET_COMMITS_SENSOR_NAME, 2);
}
|
public static void mergeParams(
Map<String, ParamDefinition> params,
Map<String, ParamDefinition> paramsToMerge,
MergeContext context) {
if (paramsToMerge == null) {
return;
}
Stream.concat(params.keySet().stream(), paramsToMerge.keySet().stream())
.forEach(
name -> {
ParamDefinition paramToMerge = paramsToMerge.get(name);
if (paramToMerge == null) {
return;
}
if (paramToMerge.getType() == ParamType.MAP && paramToMerge.isLiteral()) {
Map<String, ParamDefinition> baseMap = mapValueOrEmpty(params, name);
Map<String, ParamDefinition> toMergeMap = mapValueOrEmpty(paramsToMerge, name);
mergeParams(
baseMap,
toMergeMap,
MergeContext.copyWithParentMode(
context, params.getOrDefault(name, paramToMerge).getMode()));
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, baseMap));
} else if (paramToMerge.getType() == ParamType.STRING_MAP
&& paramToMerge.isLiteral()) {
Map<String, String> baseMap = stringMapValueOrEmpty(params, name);
Map<String, String> toMergeMap = stringMapValueOrEmpty(paramsToMerge, name);
baseMap.putAll(toMergeMap);
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, baseMap));
} else {
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, paramToMerge.getValue()));
}
});
}
|
@Test
public void testAllowedTypeCastingIntoString() throws JsonProcessingException {
Map<String, ParamDefinition> allParams =
ParamsMergeHelperTest.this.parseParamDefMap(
"{'tomerge': {'type': 'STRING','value': '', 'name': 'tomerge'}}");
Map<String, ParamDefinition> paramsToMerge =
ParamsMergeHelperTest.this.parseParamDefMap(
"{'tomerge': {'type': 'LONG', 'value': '234', 'name': 'tomerge'}}");
ParamsMergeHelper.mergeParams(allParams, paramsToMerge, definitionContext);
assertEquals(1, allParams.size());
assertEquals("234", allParams.get("tomerge").asStringParamDef().getValue());
allParams =
ParamsMergeHelperTest.this.parseParamDefMap(
"{'tomerge': {'type': 'STRING_ARRAY', 'value' : [], 'name': 'tomerge'}}");
paramsToMerge =
ParamsMergeHelperTest.this.parseParamDefMap(
"{'tomerge': {'type': 'LONG_ARRAY', 'value': [1, 2, 3], 'name': 'tomerge'}}");
ParamsMergeHelper.mergeParams(allParams, paramsToMerge, definitionContext);
assertEquals(1, allParams.size());
assertEquals(3, allParams.get("tomerge").asStringArrayParamDef().getValue().length);
allParams =
ParamsMergeHelperTest.this.parseParamDefMap(
"{'tomerge': {'type': 'STRING', 'value' : false, 'name': 'tomerge'}}");
paramsToMerge =
ParamsMergeHelperTest.this.parseParamDefMap(
"{'tomerge': {'type': 'BOOLEAN', 'value': true, 'name': 'tomerge'}}");
ParamsMergeHelper.mergeParams(allParams, paramsToMerge, definitionContext);
assertEquals(1, allParams.size());
assertEquals("true", allParams.get("tomerge").asStringParamDef().getValue());
allParams =
ParamsMergeHelperTest.this.parseParamDefMap(
"{'tomerge': {'type': 'STRING', 'value' : '', 'name': 'tomerge'}}");
paramsToMerge =
ParamsMergeHelperTest.this.parseParamDefMap(
"{'tomerge': {'type': 'DOUBLE', 'value': 122.12, 'name': 'tomerge'}}");
ParamsMergeHelper.mergeParams(allParams, paramsToMerge, definitionContext);
assertEquals(1, allParams.size());
assertEquals("122.12", allParams.get("tomerge").asStringParamDef().getValue());
}
|
public static SemanticVersion parse(String version) {
try {
var matcher = VERSION_PARSE_PATTERN.matcher(version);
matcher.find();
var semver = new SemanticVersion();
semver.setMajor(Integer.parseInt(matcher.group("major")));
semver.setMinor(Integer.parseInt(matcher.group("minor")));
semver.setPatch(Integer.parseInt(matcher.group("patch")));
semver.setPreRelease(matcher.group("prerelease"));
semver.setBuildMetadata(matcher.group("buildmetadata"));
return semver;
} catch (IllegalArgumentException | IllegalStateException e) {
throw new RuntimeException("Invalid semantic version. See https://semver.org/.");
}
}
|
@Test
public void testParseInvalidVersion() {
var exception = assertThrows(RuntimeException.class, () -> SemanticVersion.parse("9.2"));
assertEquals("Invalid semantic version. See https://semver.org/.", exception.getMessage());
}
|
@VisibleForTesting
protected void copyFromHost(MapHost host) throws IOException {
// reset retryStartTime for a new host
retryStartTime = 0;
// Get completed maps on 'host'
List<TaskAttemptID> maps = scheduler.getMapsForHost(host);
// Sanity check to catch hosts with only 'OBSOLETE' maps,
// especially at the tail of large jobs
if (maps.size() == 0) {
return;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Fetcher " + id + " going to fetch from " + host + " for: " + maps);
}
// List of maps to be fetched yet
Set<TaskAttemptID> remaining = new HashSet<TaskAttemptID>(maps);
// Construct the url and connect
URL url = getMapOutputURL(host, maps);
DataInputStream input = null;
try {
input = openShuffleUrl(host, remaining, url);
if (input == null) {
return;
}
// Loop through available map-outputs and fetch them
// On any error, faildTasks is not null and we exit
// after putting back the remaining maps to the
// yet_to_be_fetched list and marking the failed tasks.
TaskAttemptID[] failedTasks = null;
while (!remaining.isEmpty() && failedTasks == null) {
try {
failedTasks = copyMapOutput(host, input, remaining, fetchRetryEnabled);
} catch (IOException e) {
IOUtils.cleanupWithLogger(LOG, input);
//
// Setup connection again if disconnected by NM
connection.disconnect();
// Get map output from remaining tasks only.
url = getMapOutputURL(host, remaining);
input = openShuffleUrl(host, remaining, url);
if (input == null) {
return;
}
}
}
if(failedTasks != null && failedTasks.length > 0) {
LOG.warn("copyMapOutput failed for tasks "+Arrays.toString(failedTasks));
scheduler.hostFailed(host.getHostName());
for(TaskAttemptID left: failedTasks) {
scheduler.copyFailed(left, host, true, false);
}
}
// Sanity check
if (failedTasks == null && !remaining.isEmpty()) {
throw new IOException("server didn't return all expected map outputs: "
+ remaining.size() + " left.");
}
input.close();
input = null;
} finally {
if (input != null) {
IOUtils.cleanupWithLogger(LOG, input);
input = null;
}
for (TaskAttemptID left : remaining) {
scheduler.putBackKnownMapOutput(host, left);
}
}
}
|
@Test
public void testCopyFromHostWait() throws Exception {
Fetcher<Text,Text> underTest = new FakeFetcher<Text,Text>(job, id, ss, mm,
r, metrics, except, key, connection);
String replyHash = SecureShuffleUtils.generateHash(encHash.getBytes(), key);
when(connection.getResponseCode()).thenReturn(200);
when(connection.getHeaderField(SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH))
.thenReturn(replyHash);
ShuffleHeader header = new ShuffleHeader(map1ID.toString(), 10, 10, 1);
ByteArrayOutputStream bout = new ByteArrayOutputStream();
header.write(new DataOutputStream(bout));
ByteArrayInputStream in = new ByteArrayInputStream(bout.toByteArray());
when(connection.getInputStream()).thenReturn(in);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_NAME))
.thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_VERSION))
.thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
//Defaults to null, which is what we want to test
when(mm.reserve(any(TaskAttemptID.class), anyLong(), anyInt()))
.thenReturn(null);
underTest.copyFromHost(host);
verify(connection)
.addRequestProperty(SecureShuffleUtils.HTTP_HEADER_URL_HASH,
encHash);
verify(allErrs, never()).increment(1);
verify(ss, never()).copyFailed(map1ID, host, true, false);
verify(ss, never()).copyFailed(map2ID, host, true, false);
verify(ss).putBackKnownMapOutput(any(MapHost.class), eq(map1ID));
verify(ss).putBackKnownMapOutput(any(MapHost.class), eq(map2ID));
}
|
@Override
public Query normalizeQuery(final Query query, final ParameterProvider parameterProvider) {
return query.toBuilder()
.query(ElasticsearchQueryString.of(this.queryStringDecorators.decorate(query.query().queryString(), parameterProvider, query)))
.filter(normalizeFilter(query.filter(), query, parameterProvider))
.searchTypes(query.searchTypes().stream().map(searchType -> normalizeSearchType(searchType, query, parameterProvider)).collect(Collectors.toSet()))
.build();
}
|
@Test
void decoratesQueryStrings() {
final Query query = Query.builder()
.query(ElasticsearchQueryString.of("action:index"))
.build();
final Query normalizedQuery = decorateQueryStringsNormalizer.normalizeQuery(query, name -> Optional.empty());
final String normalizedQueryString = normalizedQuery.query().queryString();
assertThat(normalizedQueryString).isEqualTo("Hey there!");
}
|
public AuthenticationRequest startAuthenticationProcess(HttpServletRequest httpRequest) throws ComponentInitializationException, MessageDecodingException, SamlValidationException, SharedServiceClientException, DienstencatalogusException, SamlSessionException {
BaseHttpServletRequestXMLMessageDecoder decoder = decodeXMLRequest(httpRequest);
AuthenticationRequest authenticationRequest = createAuthenticationRequest(httpRequest, decoder);
SAMLBindingContext bindingContext = createAndValidateBindingContext(decoder);
validateAuthenticationRequest(authenticationRequest);
parseAuthentication(authenticationRequest);
validateWithOtherDomainServices(authenticationRequest, bindingContext);
return authenticationRequest;
}
|
@Test
protected void parseAuthenticationSuccessfulEntranceTest() throws SamlSessionException, SharedServiceClientException, DienstencatalogusException, ComponentInitializationException, SamlValidationException, MessageDecodingException, UnsupportedEncodingException, SamlParseException {
String samlRequest = readXMLFile(authnRequestEntranceFile);
String decodeSAMLRequest = encodeAuthnRequest(samlRequest);
httpServletRequestMock.setParameter("SAMLRequest", decodeSAMLRequest);
AuthenticationRequest result = authenticationService.startAuthenticationProcess(httpServletRequestMock);
assertNotNull(result);
assertEquals(frontChannel.concat(ENTRANCE_REQUEST_AUTHENTICATION_URL), result.getAuthnRequest().getDestination());
assertEquals("urn:nl-eid-gdi:1.0:DV:00000009999999999001:entities:0000", result.getAuthnRequest().getScoping().getRequesterIDs().get(0).getRequesterID());
}
|
public void write(ImageWriter writer, ImageWriterOptions options) {
if (options.metadataVersion().isDelegationTokenSupported()) {
for (Entry<String, DelegationTokenData> entry : tokens.entrySet()) {
writer.write(0, entry.getValue().toRecord());
}
} else {
if (!tokens.isEmpty()) {
List<String> tokenIds = new ArrayList<>(tokens.keySet());
String delegationTokenImageString = "DelegationTokenImage(" + String.join(", ", tokenIds) + ")";
options.handleLoss(delegationTokenImageString);
}
}
}
|
@Test
public void testImage1withInvalidIBP() {
ImageWriterOptions imageWriterOptions = new ImageWriterOptions.Builder().
setMetadataVersion(MetadataVersion.IBP_3_5_IV2).build();
RecordListWriter writer = new RecordListWriter();
try {
IMAGE1.write(writer, imageWriterOptions);
fail("expected exception writing IMAGE with Delegation Token records for MetadataVersion.IBP_3_5_IV2");
} catch (Exception expected) {
// ignore, expected
}
}
|
@SuppressWarnings("MethodMayBeStatic")
@Udf(description = "The 2 input points should be specified as (lat, lon) pairs, measured"
+ " in decimal degrees. An optional fifth parameter allows to specify either \"MI\" (miles)"
+ " or \"KM\" (kilometers) as the desired unit for the output measurement. Default is KM.")
public Double geoDistance(
@UdfParameter(description = "The latitude of the first point in decimal degrees.")
final double lat1,
@UdfParameter(description = "The longitude of the first point in decimal degrees.")
final double lon1,
@UdfParameter(description = "The latitude of the second point in decimal degrees.")
final double lat2,
@UdfParameter(description = "The longitude of the second point in decimal degrees.")
final double lon2,
@UdfParameter(description = "The units for the return value. Either MILES or KM.")
final String units
) {
validateLatLonValues(lat1, lon1, lat2, lon2);
final double chosenRadius = selectEarthRadiusToUse(units);
final double deltaLat = Math.toRadians(lat2 - lat1);
final double deltaLon = Math.toRadians(lon2 - lon1);
final double lat1Radians = Math.toRadians(lat1);
final double lat2Radians = Math.toRadians(lat2);
final double a =
haversin(deltaLat) + haversin(deltaLon) * Math.cos(lat1Radians) * Math.cos(lat2Radians);
final double distanceInRadians = 2 * Math.asin(Math.sqrt(a));
return distanceInRadians * chosenRadius;
}
|
@Test
public void shouldComputeDistanceBetweenLocationsWithNullUnitsUsingKM() {
assertEquals(8634.6528,
(double) distanceUdf.geoDistance(37.4439, -122.1663, 51.5257, -0.1122, null), 0.5);
}
|
@NonNull
@Override
public FileName toProviderFileName( @NonNull ConnectionFileName pvfsFileName, @NonNull T details )
throws KettleException {
StringBuilder providerUriBuilder = new StringBuilder();
appendProviderUriConnectionRoot( providerUriBuilder, details );
// Examples:
// providerUriBuilder: "hcp://domain.my:443/root/path" | "local:///C:/root/path" | "s3://"
// getPath(): "/folder/sub-folder" | "/"
appendProviderUriRestPath( providerUriBuilder, pvfsFileName.getPath(), details );
// Examples: "hcp://domain.my:443/root/path/folder/sub-folder" | "s3://folder/sub-folder"
// Preserve file type information.
if ( pvfsFileName.getType().hasChildren() ) {
providerUriBuilder.append( SEPARATOR );
}
return parseUri( providerUriBuilder.toString() );
}
|
@Test
public void testToProviderFileNameHandlesConnectionsWithDomain() throws Exception {
mockDetailsWithDomain( details1, "my-domain:8080" );
ConnectionFileName pvfsFileName = mockPvfsFileNameWithPath( "/rest/path" );
FileName providerFileName = transformer.toProviderFileName( pvfsFileName, details1 );
assertEquals( "scheme1://my-domain:8080/rest/path", providerFileName.getURI() );
// Should do provider uri normalization.
verify( kettleVFS, times( 1 ) ).resolveURI( any() );
// ---
// Change domain to have leading and trailing slashes
mockDetailsWithDomain( details1, "/my-domain:8080/" );
providerFileName = transformer.toProviderFileName( pvfsFileName, details1 );
assertEquals( "scheme1://my-domain:8080/rest/path", providerFileName.getURI() );
// Should do provider uri normalization.
verify( kettleVFS, times( 2 ) ).resolveURI( any() );
}
|
public static String toSymbolCase(CharSequence str, char symbol) {
if (str == null) {
return null;
}
final int length = str.length();
final StringBuilder sb = new StringBuilder();
char c;
for (int i = 0; i < length; i++) {
c = str.charAt(i);
final Character preChar = (i > 0) ? str.charAt(i - 1) : null;
if (Character.isUpperCase(c)) {
final Character nextChar = (i < str.length() - 1) ? str.charAt(i + 1) : null;
if (null != preChar && Character.isUpperCase(preChar)) {
sb.append(c);
} else if (null != nextChar && Character.isUpperCase(nextChar)) {
if (null != preChar && symbol != preChar) {
sb.append(symbol);
}
sb.append(c);
} else {
if (null != preChar && symbol != preChar) {
sb.append(symbol);
}
sb.append(Character.toLowerCase(c));
}
} else {
if (sb.length() > 0 && Character.isUpperCase(sb.charAt(sb.length() - 1)) && symbol != c) {
sb.append(symbol);
}
sb.append(c);
}
}
return sb.toString();
}
|
@Test
public void toSymbolCase() {
String string = "str";
String s = StringUtil.toSymbolCase(string, StringUtil.UNDERLINE);
Assert.assertEquals("str", s);
}
|
public Status currentStatus(FetchRequest request) {
final DocumentStatus ds = fetchStatus(request);
if (MUStatusType.ACTIEF == ds.getStatusMu() || ds.getDocType() == DocTypeType.NI) {
switch (ds.getStatus()) {
case GEACTIVEERD:
return Status.ACTIVE;
case UITGEREIKT:
return Status.ISSUED;
case GEBLOKKEERD:
return Status.BLOCKED;
default:
break;
}
}
return Status.INACTIVE;
}
|
@Test
public void getActiveStatusWithSuccessTest() {
final DocumentStatus dummyDocumentStatus = new DocumentStatus();
dummyDocumentStatus.setId(1L);
dummyDocumentStatus.setSequenceNo("SSSSSSSSSSSSS");
dummyDocumentStatus.setDocType(DocTypeType.NL_RIJBEWIJS);
dummyDocumentStatus.setStatus(StatusType.GEACTIVEERD);
dummyDocumentStatus.setStatusMu(MUStatusType.ACTIEF);
FetchRequest request = new FetchRequest();
request.setDocType(dummyDocumentStatus.getDocType());
request.setEpsc(encrypted);
request.setSequenceNo(dummyDocumentStatus.getSequenceNo());
when(bsnkPseudonymDecryptorMock.decryptEp(anyString(), anyString(), anyString())).thenReturn(pseudonym);
when(documentStatusRepositoryMock.findByPseudonymAndDocTypeAndSequenceNo(anyString(), any(DocTypeType.class), anyString())).thenReturn(Optional.of(dummyDocumentStatus));
Status result = documentStatusService.currentStatus(request);
assertNotNull(result);
assertEquals(Status.ACTIVE, result);
}
|
public boolean isUnqualifiedShorthandProjection() {
if (1 != projections.size()) {
return false;
}
Projection projection = projections.iterator().next();
return projection instanceof ShorthandProjection && !((ShorthandProjection) projection).getOwner().isPresent();
}
|
@Test
void assertUnqualifiedShorthandProjection() {
Projection projection = new ShorthandProjection(null, Collections.emptyList());
ProjectionsContext projectionsContext = new ProjectionsContext(0, 0, true, Collections.singleton(projection));
assertTrue(projectionsContext.isUnqualifiedShorthandProjection());
}
|
@VisibleForTesting
ExportResult<PhotosContainerResource> exportOneDrivePhotos(
TokensAndUrlAuthData authData,
Optional<IdOnlyContainerResource> albumData,
Optional<PaginationData> paginationData,
UUID jobId)
throws IOException {
Optional<String> albumId = Optional.empty();
if (albumData.isPresent()) {
albumId = Optional.of(albumData.get().getId());
}
Optional<String> paginationUrl = getDrivePaginationToken(paginationData);
MicrosoftDriveItemsResponse driveItemsResponse;
if (paginationData.isPresent() || albumData.isPresent()) {
driveItemsResponse =
getOrCreatePhotosInterface(authData).getDriveItems(albumId, paginationUrl);
} else {
driveItemsResponse =
getOrCreatePhotosInterface(authData)
.getDriveItemsFromSpecialFolder(MicrosoftSpecialFolder.FolderType.photos);
}
PaginationData nextPageData = setNextPageToken(driveItemsResponse);
ContinuationData continuationData = new ContinuationData(nextPageData);
PhotosContainerResource containerResource;
MicrosoftDriveItem[] driveItems = driveItemsResponse.getDriveItems();
List<PhotoAlbum> albums = new ArrayList<>();
List<PhotoModel> photos = new ArrayList<>();
if (driveItems != null && driveItems.length > 0) {
for (MicrosoftDriveItem driveItem : driveItems) {
PhotoAlbum album = tryConvertDriveItemToPhotoAlbum(driveItem, jobId);
if (album != null) {
albums.add(album);
continuationData.addContainerResource(new IdOnlyContainerResource(driveItem.id));
}
PhotoModel photo = tryConvertDriveItemToPhotoModel(albumId, driveItem, jobId);
if (photo != null) {
photos.add(photo);
}
}
}
ExportResult.ResultType result =
nextPageData == null ? ExportResult.ResultType.END : ExportResult.ResultType.CONTINUE;
containerResource = new PhotosContainerResource(albums, photos);
return new ExportResult<>(result, containerResource, continuationData);
}
|
@Test
public void exportAlbumAndPhotoWithNextPage() throws IOException {
// Setup
MicrosoftDriveItem folderItem = setUpSingleAlbum();
MicrosoftDriveItem photoItem = setUpSinglePhoto(IMAGE_URI, PHOTO_ID);
when(driveItemsResponse.getDriveItems())
.thenReturn(new MicrosoftDriveItem[] {folderItem, photoItem});
when(driveItemsResponse.getNextPageLink()).thenReturn(DRIVE_PAGE_URL);
// Run
ExportResult<PhotosContainerResource> result =
microsoftPhotosExporter.exportOneDrivePhotos(
null, Optional.empty(), Optional.empty(), uuid);
// Verify method calls
verify(photosInterface)
.getDriveItemsFromSpecialFolder(MicrosoftSpecialFolder.FolderType.photos);
verify(driveItemsResponse).getDriveItems();
// Verify pagination token is set
ContinuationData continuationData = result.getContinuationData();
StringPaginationToken paginationToken =
(StringPaginationToken) continuationData.getPaginationData();
assertThat(paginationToken.getToken()).isEqualTo(DRIVE_TOKEN_PREFIX + DRIVE_PAGE_URL);
// Verify one album is ready for import
Collection<PhotoAlbum> actualAlbums = result.getExportedData().getAlbums();
assertThat(actualAlbums.stream().map(PhotoAlbum::getId).collect(Collectors.toList()))
.containsExactly(FOLDER_ID);
// Verify one photo should be present (in the root Photos special folder)
Collection<PhotoModel> actualPhotos = result.getExportedData().getPhotos();
assertThat(actualPhotos.stream().map(PhotoModel::getFetchableUrl).collect(Collectors.toList()))
.containsExactly(IMAGE_URI);
assertThat(actualPhotos.stream().map(PhotoModel::getAlbumId).collect(Collectors.toList()))
.containsExactly(null);
assertThat(actualPhotos.stream().map(PhotoModel::getTitle).collect(Collectors.toList()))
.containsExactly(FILENAME);
// Verify there is one container ready for sub-processing
List<ContainerResource> actualResources = continuationData.getContainerResources();
assertThat(
actualResources.stream()
.map(a -> ((IdOnlyContainerResource) a).getId())
.collect(Collectors.toList()))
.containsExactly(FOLDER_ID);
}
|
@Override
public Map<Headers, String> getHeaders() {
return Collections.unmodifiableMap(headers);
}
|
@Test
void testGetHeaders() {
final var message = new SimpleMessage();
assertNotNull(message.getHeaders());
assertTrue(message.getHeaders().isEmpty());
final var senderName = "test";
message.addHeader(Message.Headers.SENDER, senderName);
assertNotNull(message.getHeaders());
assertFalse(message.getHeaders().isEmpty());
assertEquals(senderName, message.getHeaders().get(Message.Headers.SENDER));
}
|
public static Set<String> validateScopes(String scopeClaimName, Collection<String> scopes) throws ValidateException {
if (scopes == null)
throw new ValidateException(String.format("%s value must be non-null", scopeClaimName));
Set<String> copy = new HashSet<>();
for (String scope : scopes) {
scope = validateString(scopeClaimName, scope);
if (copy.contains(scope))
throw new ValidateException(String.format("%s value must not contain duplicates - %s already present", scopeClaimName, scope));
copy.add(scope);
}
return Collections.unmodifiableSet(copy);
}
|
@Test
public void testValidateScopesResultThrowsExceptionOnMutation() {
SortedSet<String> callerSet = new TreeSet<>(Arrays.asList("a", "b", "c"));
Set<String> scopes = ClaimValidationUtils.validateScopes("scope", callerSet);
assertThrows(UnsupportedOperationException.class, scopes::clear);
}
|
public boolean containsCustomMappingForField(final String fieldName) {
return stream().anyMatch(m -> m.fieldName().equals(fieldName));
}
|
@Test
void testContainsMappingForFieldWorksCorrectly() {
CustomFieldMappings mapping = new CustomFieldMappings(List.of(
new CustomFieldMapping("field1", "string"),
new CustomFieldMapping("field2", "long")
));
assertTrue(mapping.containsCustomMappingForField("field1"));
assertTrue(mapping.containsCustomMappingForField("field2"));
assertFalse(mapping.containsCustomMappingForField("bubamara!"));
}
|
@Override
public void upgrade() {
try {
streamService.load(Stream.DEFAULT_STREAM_ID);
} catch (NotFoundException ignored) {
createDefaultStream();
}
}
|
@Test
public void upgradeWithoutDefaultIndexSet() throws Exception {
when(streamService.load("000000000000000000000001")).thenThrow(NotFoundException.class);
when(indexSetRegistry.getDefault()).thenThrow(IllegalStateException.class);
expectedException.expect(IllegalStateException.class);
migration.upgrade();
}
|
@AroundInvoke
public Object intercept(InvocationContext context) throws Exception { // NOPMD
// cette méthode est appelée par le conteneur ejb grâce à l'annotation AroundInvoke
if (DISABLED || !EJB_COUNTER.isDisplayed()) {
return context.proceed();
}
// nom identifiant la requête
final String requestName = getRequestName(context);
boolean systemError = false;
try {
EJB_COUNTER.bindContextIncludingCpu(requestName);
return context.proceed();
} catch (final Error e) {
// on catche Error pour avoir les erreurs systèmes
// mais pas Exception qui sont fonctionnelles en général
systemError = true;
throw e;
} finally {
// on enregistre la requête dans les statistiques
EJB_COUNTER.addRequestForCurrentContext(systemError);
}
}
|
@Test
public void testMonitoringAsynchronousCdi() throws Exception {
final Counter ejbCounter = MonitoringProxy.getEjbCounter();
ejbCounter.clear();
final MonitoringAsynchronousCdiInterceptor interceptor = new MonitoringAsynchronousCdiInterceptor();
ejbCounter.setDisplayed(true);
interceptor.intercept(new InvokeContext(false));
assertSame("requestsCount", 1, ejbCounter.getRequestsCount());
}
|
@Override
public boolean remove(Object o)
{
return _map.remove(o) == PRESENT;
}
|
@Test
public void testRemove()
{
final CowSet<String> set = new CowSet<>();
set.add("test");
Assert.assertTrue(set.remove("test"));
Assert.assertFalse(set.contains("test"));
Assert.assertEquals(set.size(), 0);
Assert.assertFalse(set.remove("test"));
Assert.assertFalse(set.contains("test"));
Assert.assertEquals(set.size(), 0);
}
|
@Override
public void execute(CommandLine commandLine, Options options,
RPCHook rpcHook) throws SubCommandException {
DefaultMQAdminExt defaultMQAdminExt = new DefaultMQAdminExt(rpcHook);
defaultMQAdminExt.setInstanceName(Long.toString(System.currentTimeMillis()));
try {
String accessKey = commandLine.getOptionValue('a').trim();
if (commandLine.hasOption('b')) {
String addr = commandLine.getOptionValue('b').trim();
defaultMQAdminExt.start();
defaultMQAdminExt.deletePlainAccessConfig(addr, accessKey);
System.out.printf("delete plain access config account from %s success.%n", addr);
System.out.printf("account's accessKey is:%s", accessKey);
return;
} else if (commandLine.hasOption('c')) {
String clusterName = commandLine.getOptionValue('c').trim();
defaultMQAdminExt.start();
Set<String> brokerAddrSet =
CommandUtil.fetchMasterAndSlaveAddrByClusterName(defaultMQAdminExt, clusterName);
for (String addr : brokerAddrSet) {
defaultMQAdminExt.deletePlainAccessConfig(addr, accessKey);
System.out.printf("delete plain access config account from %s success.%n", addr);
}
System.out.printf("account's accessKey is:%s", accessKey);
return;
}
ServerUtil.printCommandLineHelp("mqadmin " + this.commandName(), options);
} catch (Exception e) {
throw new SubCommandException(this.getClass().getSimpleName() + " command failed", e);
} finally {
defaultMQAdminExt.shutdown();
}
}
|
@Test
public void testExecute() {
DeleteAccessConfigSubCommand cmd = new DeleteAccessConfigSubCommand();
Options options = ServerUtil.buildCommandlineOptions(new Options());
String[] subargs = new String[] {"-a unit-test", "-c default-cluster"};
final CommandLine commandLine =
ServerUtil.parseCmdLine("mqadmin " + cmd.commandName(), subargs,
cmd.buildCommandlineOptions(options), new DefaultParser());
assertThat(commandLine.getOptionValue('a').trim()).isEqualTo("unit-test");
assertThat(commandLine.getOptionValue('c').trim()).isEqualTo("default-cluster");
}
|
public String getDiscriminatingValue(ILoggingEvent event) {
// http://jira.qos.ch/browse/LBCLASSIC-213
Map<String, String> mdcMap = event.getMDCPropertyMap();
if (mdcMap == null) {
return defaultValue;
}
String mdcValue = mdcMap.get(key);
if (mdcValue == null) {
return defaultValue;
} else {
return mdcValue;
}
}
|
@Test
public void smoke() {
logbackMDCAdapter.put(key, value);
event = new LoggingEvent("a", logger, Level.DEBUG, "", null, null);
String discriminatorValue = discriminator.getDiscriminatingValue(event);
assertEquals(value, discriminatorValue);
}
|
public static double var(int[] array) {
if (array.length < 2) {
throw new IllegalArgumentException("Array length is less than 2.");
}
double sum = 0.0;
double sumsq = 0.0;
for (int xi : array) {
sum += xi;
sumsq += xi * xi;
}
int n = array.length - 1;
return sumsq / n - (sum / array.length) * (sum / n);
}
|
@Test
public void testVar_doubleArr() {
System.out.println("var");
double[] data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
assertEquals(7.5, MathEx.var(data), 1E-6);
}
|
public static <T> Stream<T> stream(Enumeration<T> e) {
return StreamSupport.stream(
Spliterators.spliteratorUnknownSize(
new Iterator<T>() {
public T next() {
return e.nextElement();
}
public boolean hasNext() {
return e.hasMoreElements();
}
},
Spliterator.ORDERED), false);
}
|
@Test
public void test_stream_from_empty_enumeration() {
Enumeration<Integer> someEnumeration = Collections.enumeration(Collections.emptyList());
assertThat(EnumerationUtil.stream(someEnumeration).collect(Collectors.toList())).isEmpty();
}
|
public RecordReader getRecordReader(InputSplit split, JobConf job,
Reporter reporter) throws IOException {
FileSplit fileSplit = (FileSplit) split;
FileSystem fs = FileSystem.get(fileSplit.getPath().toUri(), job);
FSDataInputStream is = fs.open(fileSplit.getPath());
byte[] header = new byte[3];
RecordReader reader = null;
try {
is.readFully(header);
} catch (EOFException eof) {
reader = textInputFormat.getRecordReader(split, job, reporter);
} finally {
is.close();
}
if (header[0] == 'S' && header[1] == 'E' && header[2] == 'Q') {
reader = seqFileInputFormat.getRecordReader(split, job, reporter);
} else {
reader = textInputFormat.getRecordReader(split, job, reporter);
}
return reader;
}
|
@SuppressWarnings( { "unchecked", "deprecation" })
@Test
public void testFormat() throws IOException {
JobConf job = new JobConf(conf);
FileSystem fs = FileSystem.getLocal(conf);
Path dir = new Path(System.getProperty("test.build.data", ".") + "/mapred");
Path txtFile = new Path(dir, "auto.txt");
Path seqFile = new Path(dir, "auto.seq");
fs.delete(dir, true);
FileInputFormat.setInputPaths(job, dir);
Writer txtWriter = new OutputStreamWriter(fs.create(txtFile));
try {
for (int i = 0; i < LINES_COUNT; i++) {
txtWriter.write("" + (10 * i));
txtWriter.write("\n");
}
} finally {
txtWriter.close();
}
SequenceFile.Writer seqWriter = SequenceFile.createWriter(fs, conf,
seqFile, IntWritable.class, LongWritable.class);
try {
for (int i = 0; i < RECORDS_COUNT; i++) {
IntWritable key = new IntWritable(11 * i);
LongWritable value = new LongWritable(12 * i);
seqWriter.append(key, value);
}
} finally {
seqWriter.close();
}
AutoInputFormat format = new AutoInputFormat();
InputSplit[] splits = format.getSplits(job, SPLITS_COUNT);
for (InputSplit split : splits) {
RecordReader reader = format.getRecordReader(split, job, Reporter.NULL);
Object key = reader.createKey();
Object value = reader.createValue();
try {
while (reader.next(key, value)) {
if (key instanceof LongWritable) {
assertEquals("Wrong value class.", Text.class, value.getClass());
assertTrue("Invalid value", Integer.parseInt(((Text) value)
.toString()) % 10 == 0);
} else {
assertEquals("Wrong key class.", IntWritable.class, key.getClass());
assertEquals("Wrong value class.", LongWritable.class, value
.getClass());
assertTrue("Invalid key.", ((IntWritable) key).get() % 11 == 0);
assertTrue("Invalid value.", ((LongWritable) value).get() % 12 == 0);
}
}
} finally {
reader.close();
}
}
}
|
public DdlCommandResult execute(
final String sql,
final DdlCommand ddlCommand,
final boolean withQuery,
final Set<SourceName> withQuerySources
) {
return execute(sql, ddlCommand, withQuery, withQuerySources, false);
}
|
@Test
public void shouldDropExistingType() {
// Given:
metaStore.registerType("type", SqlTypes.STRING);
// When:
final DdlCommandResult result = cmdExec.execute(SQL_TEXT, dropType, false, NO_QUERY_SOURCES);
// Then:
assertThat(metaStore.resolveType("type").isPresent(), is(false));
assertThat("Expected successful execution", result.isSuccess());
assertThat(result.getMessage(), is("Dropped type 'type'"));
}
|
@Override
public Image call() throws LayerPropertyNotFoundException {
try (ProgressEventDispatcher ignored =
progressEventDispatcherFactory.create("building image format", 1);
TimerEventDispatcher ignored2 =
new TimerEventDispatcher(buildContext.getEventHandlers(), DESCRIPTION)) {
// Constructs the image.
Image.Builder imageBuilder = Image.builder(buildContext.getTargetFormat());
// Base image layers
baseImageLayers.forEach(imageBuilder::addLayer);
// Passthrough config and count non-empty history entries
int nonEmptyLayerCount = 0;
for (HistoryEntry historyObject : baseImage.getHistory()) {
imageBuilder.addHistory(historyObject);
if (!historyObject.hasCorrespondingLayer()) {
nonEmptyLayerCount++;
}
}
imageBuilder
.setArchitecture(baseImage.getArchitecture())
.setOs(baseImage.getOs())
.addEnvironment(baseImage.getEnvironment())
.addLabels(baseImage.getLabels())
.setHealthCheck(baseImage.getHealthCheck())
.addExposedPorts(baseImage.getExposedPorts())
.addVolumes(baseImage.getVolumes())
.setUser(baseImage.getUser())
.setWorkingDirectory(baseImage.getWorkingDirectory());
ContainerConfiguration containerConfiguration = buildContext.getContainerConfiguration();
// Add history elements for non-empty layers that don't have one yet
Instant layerCreationTime = containerConfiguration.getCreationTime();
for (int count = 0; count < baseImageLayers.size() - nonEmptyLayerCount; count++) {
imageBuilder.addHistory(
HistoryEntry.builder()
.setCreationTimestamp(layerCreationTime)
.setComment("auto-generated by Jib")
.build());
}
// Add built layers/configuration
for (PreparedLayer applicationLayer : applicationLayers) {
imageBuilder
.addLayer(applicationLayer)
.addHistory(
HistoryEntry.builder()
.setCreationTimestamp(layerCreationTime)
.setAuthor("Jib")
.setCreatedBy(buildContext.getToolName() + ":" + buildContext.getToolVersion())
.setComment(applicationLayer.getName())
.build());
}
imageBuilder
.addEnvironment(containerConfiguration.getEnvironmentMap())
.setCreated(containerConfiguration.getCreationTime())
.setEntrypoint(computeEntrypoint(baseImage, containerConfiguration))
.setProgramArguments(computeProgramArguments(baseImage, containerConfiguration))
.addExposedPorts(containerConfiguration.getExposedPorts())
.addVolumes(containerConfiguration.getVolumes())
.addLabels(containerConfiguration.getLabels());
if (containerConfiguration.getUser() != null) {
imageBuilder.setUser(containerConfiguration.getUser());
}
if (containerConfiguration.getWorkingDirectory() != null) {
imageBuilder.setWorkingDirectory(containerConfiguration.getWorkingDirectory().toString());
}
// Gets the container configuration content descriptor.
return imageBuilder.build();
}
}
|
@Test
public void test_basicCase() {
Image image =
new BuildImageStep(
mockBuildContext,
mockProgressEventDispatcherFactory,
baseImage,
baseImageLayers,
applicationLayers)
.call();
Assert.assertEquals("root", image.getUser());
Assert.assertEquals(
testDescriptorDigest, image.getLayers().get(0).getBlobDescriptor().getDigest());
}
|
@Override
public <K> HostToKeyMapper<K> getPartitionInformation(URI serviceUri, Collection<K> keys,
int limitHostPerPartition,
int hash)
throws ServiceUnavailableException
{
if (limitHostPerPartition <= 0)
{
throw new IllegalArgumentException("limitHostPartition cannot be 0 or less");
}
ServiceProperties service = listenToServiceAndCluster(serviceUri);
String serviceName = service.getServiceName();
String clusterName = service.getClusterName();
ClusterProperties cluster = getClusterProperties(serviceName, clusterName);
LoadBalancerStateItem<UriProperties> uriItem = getUriItem(serviceName, clusterName, cluster);
UriProperties uris = uriItem.getProperty();
List<LoadBalancerState.SchemeStrategyPair> orderedStrategies =
_state.getStrategiesForService(serviceName, service.getPrioritizedSchemes());
Map<Integer, Integer> partitionWithoutEnoughHost = new HashMap<>();
if (! orderedStrategies.isEmpty())
{
// get the partitionId -> keys mapping
final PartitionAccessor accessor = getPartitionAccessor(serviceName, clusterName);
int maxPartitionId = accessor.getMaxPartitionId();
List<K> unmappedKeys = new ArrayList<>();
Map<Integer, Set<K>> partitionSet = getPartitionSet(keys, accessor, unmappedKeys);
// get the partitionId -> host URIs list
Map<Integer, KeysAndHosts<K>> partitionDataMap = new HashMap<>();
for (Integer partitionId : partitionSet.keySet())
{
for (LoadBalancerState.SchemeStrategyPair pair : orderedStrategies)
{
TrackerClientSubsetItem subsetItem = getPotentialClients(serviceName, service, cluster, uris,
pair.getScheme(), partitionId, uriItem.getVersion());
Map<URI, TrackerClient> trackerClients = subsetItem.getWeightedSubset();
int size = Math.min(trackerClients.size(), limitHostPerPartition);
List<URI> rankedUri = new ArrayList<>(size);
Ring<URI> ring = pair.getStrategy().getRing(uriItem.getVersion(), partitionId, trackerClients,
subsetItem.shouldForceUpdate());
Iterator<URI> iterator = ring.getIterator(hash);
while (iterator.hasNext() && rankedUri.size() < size)
{
URI uri = iterator.next();
if (!rankedUri.contains(uri))
{
rankedUri.add(uri);
}
}
if (rankedUri.size() < limitHostPerPartition)
{
partitionWithoutEnoughHost.put(partitionId, limitHostPerPartition - rankedUri.size());
}
KeysAndHosts<K> keysAndHosts = new KeysAndHosts<>(partitionSet.get(partitionId), rankedUri);
partitionDataMap.put(partitionId, keysAndHosts);
if (!rankedUri.isEmpty())
{
// don't go to the next strategy if there are already hosts in the current one
break;
}
}
}
return new HostToKeyMapper<>(unmappedKeys, partitionDataMap, limitHostPerPartition, maxPartitionId + 1, partitionWithoutEnoughHost);
}
else
{
throw new ServiceUnavailableException(serviceName, "PEGA_1009. Unable to find a load balancer strategy" +
"Server Schemes: [" + String.join(", ", service.getPrioritizedSchemes()) + ']');
}
}
|
@Test
public void testGetAllPartitionMultipleHostsOrdering()
throws Exception
{
String serviceName = "articles";
String clusterName = "cluster";
String path = "path";
String strategyName = "degrader";
//setup partition
Map<URI,Map<Integer, PartitionData>> partitionDescriptions = new HashMap<>();
final URI server1 = new URI("http://foo1.com");
Map<Integer, PartitionData> server1Data = new HashMap<>();
server1Data.put(1, new PartitionData(1.0));
server1Data.put(2, new PartitionData(1.0));
server1Data.put(3, new PartitionData(1.0));
partitionDescriptions.put(server1, server1Data);
final URI server2 = new URI("http://foo2.com");
Map<Integer, PartitionData> server2Data = new HashMap<>();
server2Data.put(1, new PartitionData(1.0));
server2Data.put(2, new PartitionData(1.0));
//server2Data.put(3, new PartitionData(1.0));
partitionDescriptions.put(server2, server2Data);
final URI server3 = new URI("http://foo3.com");
Map<Integer, PartitionData> server3Data = new HashMap<>();
server3Data.put(1, new PartitionData(1.0));
server3Data.put(2, new PartitionData(1.0));
//server3Data.put(3, new PartitionData(1.0));
partitionDescriptions.put(server3, server3Data);
//setup strategy which involves tweaking the hash ring to get partitionId -> URI host
List<LoadBalancerState.SchemeStrategyPair> orderedStrategies = new ArrayList<>();
LoadBalancerStrategy strategy = new TestLoadBalancerStrategy(partitionDescriptions);
orderedStrategies.add(new LoadBalancerState.SchemeStrategyPair(PropertyKeys.HTTP_SCHEME, strategy));
//setup the partition accessor which is used to get partitionId -> keys
PartitionAccessor accessor = new TestPartitionAccessor();
URI serviceURI = new URI("d2://" + serviceName);
SimpleLoadBalancer balancer = new SimpleLoadBalancer(new PartitionedLoadBalancerTestState(
clusterName, serviceName, path, strategyName, partitionDescriptions, orderedStrategies,
accessor
), _d2Executor);
HostToKeyMapper<URI> result = balancer.getPartitionInformation(serviceURI, null, 3, 123);
Assert.assertEquals(result.getPartitionInfoMap().size(), 4);
Assert.assertEquals(4, result.getPartitionCount());
// partition 0 should be empty
Assert.assertTrue(result.getPartitionInfoMap().get(0).getHosts().isEmpty());
// partition 1 should have server1, server2 and server3.
List<URI> ordering1 = result.getPartitionInfoMap().get(1).getHosts();
List<URI> allServers = new ArrayList<>();
allServers.add(server1);
allServers.add(server2);
allServers.add(server3);
assertEquals(ordering1.size(), 3);
Assert.assertTrue(ordering1.containsAll(allServers));
// partition 2 should be the same as partition 1
List<URI> ordering2 = result.getPartitionInfoMap().get(2).getHosts();
Assert.assertEquals(ordering1, ordering2);
// partition 3 should only contain server1
List<URI> ordering3 = result.getPartitionInfoMap().get(3).getHosts();
Assert.assertEquals(ordering3.get(0), server1);
// partition 0 and partition 3 should not have enough hosts: lacking 3 and 2 respectively.
Assert.assertTrue(result.getPartitionsWithoutEnoughHosts().containsKey(3));
Assert.assertTrue(result.getPartitionsWithoutEnoughHosts().containsKey(0));
Assert.assertEquals((int)result.getPartitionsWithoutEnoughHosts().get(3), 2);
Assert.assertEquals((int)result.getPartitionsWithoutEnoughHosts().get(0), 3);
}
|
@Override
public OAuth2CodeDO consumeAuthorizationCode(String code) {
OAuth2CodeDO codeDO = oauth2CodeMapper.selectByCode(code);
if (codeDO == null) {
throw exception(OAUTH2_CODE_NOT_EXISTS);
}
if (DateUtils.isExpired(codeDO.getExpiresTime())) {
throw exception(OAUTH2_CODE_EXPIRE);
}
oauth2CodeMapper.deleteById(codeDO.getId());
return codeDO;
}
|
@Test
public void testConsumeAuthorizationCode_expired() {
// 准备参数
String code = "test_code";
// mock 数据
OAuth2CodeDO codeDO = randomPojo(OAuth2CodeDO.class).setCode(code)
.setExpiresTime(LocalDateTime.now().minusDays(1));
oauth2CodeMapper.insert(codeDO);
// 调用,并断言
assertServiceException(() -> oauth2CodeService.consumeAuthorizationCode(code),
OAUTH2_CODE_EXPIRE);
}
|
public static ScheduledTaskHandler of(UUID uuid, String schedulerName, String taskName) {
return new ScheduledTaskHandlerImpl(uuid, -1, schedulerName, taskName);
}
|
@Test
public void of_equalityDifferentTypes() {
String urnA = "urn:hzScheduledTaskHandler:39ffc539-a356-444c-bec7-6f644462c208 -1 Scheduler Task";
String urnB = "urn:hzScheduledTaskHandler:- 2 Scheduler Task";
assertNotEquals(ScheduledTaskHandler.of(urnA), ScheduledTaskHandler.of(urnB));
}
|
public static void setEnvFromInputProperty(Map<String, String> env,
String propName, String defaultPropValue, Configuration conf,
String classPathSeparator) {
String envString = conf.get(propName, defaultPropValue);
// Get k,v pairs from string into a tmp env. Note that we don't want
// to expand the env var values, because we will do that below -
// don't want to do it twice.
Map<String, String> tmpEnv = new HashMap<String, String>();
Apps.setEnvFromInputStringNoExpand(tmpEnv, envString, classPathSeparator);
// Get map of props with prefix propName.
// (e.g., map.reduce.env.ENV_VAR_NAME=value)
Map<String, String> inputMap = conf.getPropsWithPrefix(propName + ".");
// Entries from map should override entries from input string.
tmpEnv.putAll(inputMap);
// Add them to the environment
setEnvFromInputStringMap(env, tmpEnv, classPathSeparator);
}
|
@Test
void testSetEnvFromInputPropertyNull() {
Configuration conf = new Configuration(false);
Map<String, String> env = new HashMap<>();
String propName = "mapreduce.map.env";
String defaultPropName = "mapreduce.child.env";
// Setup environment input properties
conf.set(propName, "env1=env1_val,env2=env2_val,env3=env3_val");
conf.set(propName + ".env4", "env4_val");
conf.set(propName + ".env2", "new_env2_val");
// Setup some default values - we shouldn't see these values
conf.set(defaultPropName, "env1=def1_val,env2=def2_val,env3=def3_val");
String defaultPropValue = conf.get(defaultPropName);
// These should never be referenced.
conf.set(defaultPropName + ".env4", "def4_val");
conf.set(defaultPropName + ".env2", "new_def2_val");
// Try with null inputs
Apps.setEnvFromInputProperty(env, "bogus1", null, conf, File.pathSeparator);
assertTrue(env.isEmpty());
}
|
public static boolean isBean(Type type) {
return isBean(TypeRef.of(type));
}
|
@Test
public void isBeanTest() {
Assert.assertTrue(TypeUtils.isBean(BeanA.class));
Assert.assertFalse(TypeUtils.isBean(Object.class));
}
|
@Override
@SneakyThrows
public String createFile(String name, String path, byte[] content) {
// 计算默认的 path 名
String type = FileTypeUtils.getMineType(content, name);
if (StrUtil.isEmpty(path)) {
path = FileUtils.generatePath(content, name);
}
// 如果 name 为空,则使用 path 填充
if (StrUtil.isEmpty(name)) {
name = path;
}
// 上传到文件存储器
FileClient client = fileConfigService.getMasterFileClient();
Assert.notNull(client, "客户端(master) 不能为空");
String url = client.upload(content, path, type);
// 保存到数据库
FileDO file = new FileDO();
file.setConfigId(client.getId());
file.setName(name);
file.setPath(path);
file.setUrl(url);
file.setType(type);
file.setSize(content.length);
fileMapper.insert(file);
return url;
}
|
@Test
public void testCreateFile_success() throws Exception {
// 准备参数
String path = randomString();
byte[] content = ResourceUtil.readBytes("file/erweima.jpg");
// mock Master 文件客户端
FileClient client = mock(FileClient.class);
when(fileConfigService.getMasterFileClient()).thenReturn(client);
String url = randomString();
when(client.upload(same(content), same(path), eq("image/jpeg"))).thenReturn(url);
when(client.getId()).thenReturn(10L);
String name = "单测文件名";
// 调用
String result = fileService.createFile(name, path, content);
// 断言
assertEquals(result, url);
// 校验数据
FileDO file = fileMapper.selectOne(FileDO::getPath, path);
assertEquals(10L, file.getConfigId());
assertEquals(path, file.getPath());
assertEquals(url, file.getUrl());
assertEquals("image/jpeg", file.getType());
assertEquals(content.length, file.getSize());
}
|
@Override
public String getName() {
return FUNCTION_NAME;
}
|
@Test
public void testRoundDecimalNullLiteral() {
ExpressionContext expression =
RequestContextUtils.getExpression(String.format("round_decimal(null)", INT_SV_COLUMN));
TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
Assert.assertTrue(transformFunction instanceof RoundDecimalTransformFunction);
Assert.assertEquals(transformFunction.getName(), TransformFunctionType.ROUND_DECIMAL.getName());
double[] expectedValues = new double[NUM_ROWS];
RoaringBitmap roaringBitmap = new RoaringBitmap();
roaringBitmap.add(0L, NUM_ROWS);
testTransformFunctionWithNull(transformFunction, expectedValues, roaringBitmap);
}
|
public ValidationResult isRepositoryConfigurationValid(String pluginId, final RepositoryConfiguration repositoryConfiguration) {
return pluginRequestHelper.submitRequest(pluginId, REQUEST_VALIDATE_REPOSITORY_CONFIGURATION, new DefaultPluginInteractionCallback<>() {
@Override
public String requestBody(String resolvedExtensionVersion) {
return messageConverter(resolvedExtensionVersion).requestMessageForIsRepositoryConfigurationValid(repositoryConfiguration);
}
@Override
public ValidationResult onSuccess(String responseBody, Map<String, String> responseHeaders, String resolvedExtensionVersion) {
return messageConverter(resolvedExtensionVersion).responseMessageForIsRepositoryConfigurationValid(responseBody);
}
});
}
|
@Test
public void shouldTalkToPluginToCheckIfRepositoryConfigurationIsValid() throws Exception {
String expectedRequestBody = "{\"repository-configuration\":{\"key-one\":{\"value\":\"value-one\"},\"key-two\":{\"value\":\"value-two\"}}}";
String expectedResponseBody = "[{\"key\":\"key-one\",\"message\":\"incorrect value\"},{\"message\":\"general error\"}]";
when(pluginManager.isPluginOfType(PACKAGE_MATERIAL_EXTENSION, PLUGIN_ID)).thenReturn(true);
when(pluginManager.submitTo(eq(PLUGIN_ID), eq(PACKAGE_MATERIAL_EXTENSION), requestArgumentCaptor.capture())).thenReturn(DefaultGoPluginApiResponse.success(expectedResponseBody));
ValidationResult validationResult = extension.isRepositoryConfigurationValid(PLUGIN_ID, repositoryConfiguration);
assertRequest(requestArgumentCaptor.getValue(), PACKAGE_MATERIAL_EXTENSION, "1.0", PackageRepositoryExtension.REQUEST_VALIDATE_REPOSITORY_CONFIGURATION, expectedRequestBody);
assertValidationError(validationResult.getErrors().get(0), "key-one", "incorrect value");
assertValidationError(validationResult.getErrors().get(1), "", "general error");
}
|
public int getSubpartitionId() {
return subpartitionId;
}
|
@Test
void testGetSubpartitionId() {
Buffer buffer = BufferBuilderTestUtils.buildSomeBuffer(0);
int bufferIndex = 0;
int subpartitionId = 1;
NettyPayload nettyPayload = NettyPayload.newBuffer(buffer, bufferIndex, subpartitionId);
assertThat(nettyPayload.getSubpartitionId()).isEqualTo(subpartitionId);
}
|
@Override
public void persist(final String key, final String value) {
try {
if (isExisted(key)) {
update(key, value);
return;
}
String tempPrefix = "";
String parent = SEPARATOR;
String[] paths = Arrays.stream(key.split(SEPARATOR)).filter(each -> !Strings.isNullOrEmpty(each)).toArray(String[]::new);
// Create key level directory recursively.
for (int i = 0; i < paths.length - 1; i++) {
String tempKey = tempPrefix + SEPARATOR + paths[i];
if (!isExisted(tempKey)) {
insert(tempKey, "", parent);
}
tempPrefix = tempKey;
parent = tempKey;
}
insert(key, value, parent);
} catch (final SQLException ex) {
log.error("Persist {} data to key: {} failed", getType(), key, ex);
}
}
|
@Test
void assertPersistFailureDuringUpdate() throws SQLException {
final String key = "key";
when(mockJdbcConnection.prepareStatement(repositorySQL.getSelectByKeySQL())).thenReturn(mockPreparedStatement);
when(mockPreparedStatement.executeQuery()).thenReturn(mockResultSet);
when(mockResultSet.next()).thenReturn(true);
when(mockJdbcConnection.prepareStatement(repositorySQL.getUpdateSQL())).thenReturn(mockPreparedStatement);
repository.persist(key, "value");
verify(mockPreparedStatementForPersist, times(0)).executeUpdate();
}
|
public static Integer parseRestBindPortFromWebInterfaceUrl(String webInterfaceUrl) {
if (webInterfaceUrl != null) {
final int lastColon = webInterfaceUrl.lastIndexOf(':');
if (lastColon == -1) {
return -1;
} else {
try {
return Integer.parseInt(webInterfaceUrl.substring(lastColon + 1));
} catch (NumberFormatException e) {
return -1;
}
}
} else {
return -1;
}
}
|
@Test
void testParseRestBindPortFromWebInterfaceUrlWithInvalidSchema() {
assertThat(ResourceManagerUtils.parseRestBindPortFromWebInterfaceUrl("localhost:8080//"))
.isEqualTo(-1);
}
|
public static boolean isMaskBitValid(int maskBit) {
return MaskBit.get(maskBit) != null;
}
|
@Test
public void isMaskBitInvalidTest() {
final boolean maskBitValid = Ipv4Util.isMaskBitValid(33);
assertFalse(maskBitValid);
}
|
@Override
public void validateConnectorConfig(Map<String, String> connectorProps, Callback<ConfigInfos> callback) {
validateConnectorConfig(connectorProps, callback, true);
}
|
@Test
public void testConfigValidationMissingName() {
final Class<? extends Connector> connectorClass = SampleSourceConnector.class;
AbstractHerder herder = createConfigValidationHerder(connectorClass, noneConnectorClientConfigOverridePolicy);
Map<String, String> config = Collections.singletonMap(ConnectorConfig.CONNECTOR_CLASS_CONFIG, connectorClass.getName());
ConfigInfos result = herder.validateConnectorConfig(config, s -> null, false);
// We expect there to be errors due to the missing name and .... Note that these assertions depend heavily on
// the config fields for SourceConnectorConfig, but we expect these to change rarely.
assertEquals(connectorClass.getName(), result.name());
assertEquals(Arrays.asList(ConnectorConfig.COMMON_GROUP, ConnectorConfig.TRANSFORMS_GROUP,
ConnectorConfig.PREDICATES_GROUP, ConnectorConfig.ERROR_GROUP,
SourceConnectorConfig.TOPIC_CREATION_GROUP, SourceConnectorConfig.EXACTLY_ONCE_SUPPORT_GROUP,
SourceConnectorConfig.OFFSETS_TOPIC_GROUP), result.groups());
assertEquals(2, result.errorCount());
Map<String, ConfigInfo> infos = result.values().stream()
.collect(Collectors.toMap(info -> info.configKey().name(), Function.identity()));
// Base connector config has 15 fields, connector's configs add 7
assertEquals(22, infos.size());
// Missing name should generate an error
assertEquals(ConnectorConfig.NAME_CONFIG,
infos.get(ConnectorConfig.NAME_CONFIG).configValue().name());
assertEquals(1, infos.get(ConnectorConfig.NAME_CONFIG).configValue().errors().size());
// "required" config from connector should generate an error
assertEquals("required", infos.get("required").configValue().name());
assertEquals(1, infos.get("required").configValue().errors().size());
verifyValidationIsolation();
}
|
@Override
public String execute(SampleResult previousResult, Sampler currentSampler) throws InvalidVariableException {
String originalString = values[0].execute();
String mode = null; // default
if (values.length > 1) {
mode = values[1].execute();
}
if(StringUtils.isEmpty(mode)){
mode = ChangeCaseMode.UPPER.getName(); // default
}
String targetString = changeCase(originalString, mode);
addVariableValue(targetString, values, 2);
return targetString;
}
|
@Test
public void testEmptyMode() throws Exception {
String returnValue = execute("ab-CD eF", "");
assertEquals("AB-CD EF", returnValue);
}
|
public void writeInfinity() {
writeTrailingBytes(INFINITY_ENCODED);
}
|
@Test
public void testWriteInfinity() {
OrderedCode orderedCode = new OrderedCode();
try {
orderedCode.readInfinity();
fail("Expected IllegalArgumentException.");
} catch (IllegalArgumentException e) {
// expected
}
orderedCode.writeInfinity();
assertTrue(orderedCode.readInfinity());
try {
orderedCode.readInfinity();
fail("Expected IllegalArgumentException.");
} catch (IllegalArgumentException e) {
// expected
}
}
|
@Override
public void start() throws PulsarServerException {
try {
// At this point, the ports will be updated with the real port number that the server was assigned
Map<String, String> protocolData = pulsar.getProtocolDataToAdvertise();
lastData = new LocalBrokerData(pulsar.getWebServiceAddress(), pulsar.getWebServiceAddressTls(),
pulsar.getBrokerServiceUrl(), pulsar.getBrokerServiceUrlTls(), pulsar.getAdvertisedListeners());
lastData.setProtocols(protocolData);
// configure broker-topic mode
lastData.setPersistentTopicsEnabled(pulsar.getConfiguration().isEnablePersistentTopics());
lastData.setNonPersistentTopicsEnabled(pulsar.getConfiguration().isEnableNonPersistentTopics());
localData = new LocalBrokerData(pulsar.getWebServiceAddress(), pulsar.getWebServiceAddressTls(),
pulsar.getBrokerServiceUrl(), pulsar.getBrokerServiceUrlTls(), pulsar.getAdvertisedListeners());
localData.setProtocols(protocolData);
localData.setBrokerVersionString(pulsar.getBrokerVersion());
// configure broker-topic mode
localData.setPersistentTopicsEnabled(pulsar.getConfiguration().isEnablePersistentTopics());
localData.setNonPersistentTopicsEnabled(pulsar.getConfiguration().isEnableNonPersistentTopics());
localData.setLoadManagerClassName(conf.getLoadManagerClassName());
String brokerId = pulsar.getBrokerId();
brokerZnodePath = LoadManager.LOADBALANCE_BROKERS_ROOT + "/" + brokerId;
updateLocalBrokerData();
brokerDataLock = brokersData.acquireLock(brokerZnodePath, localData).join();
pulsarResources.getLoadBalanceResources()
.getBrokerTimeAverageDataResources()
.updateTimeAverageBrokerData(brokerId, new TimeAverageBrokerData())
.join();
updateAll();
} catch (Exception e) {
log.error("Unable to acquire lock for broker: [{}]", brokerZnodePath, e);
throw new PulsarServerException(e);
}
}
|
@Test
public void testOwnBrokerZnodeByMultipleBroker() throws Exception {
ServiceConfiguration config = new ServiceConfiguration();
config.setLoadManagerClassName(ModularLoadManagerImpl.class.getName());
config.setClusterName("use");
config.setWebServicePort(Optional.of(PortManager.nextLockedFreePort()));
config.setMetadataStoreUrl("zk:127.0.0.1:" + bkEnsemble.getZookeeperPort());
config.setBrokerShutdownTimeoutMs(0L);
config.setLoadBalancerOverrideBrokerNicSpeedGbps(Optional.of(1.0d));
config.setBrokerServicePort(Optional.of(0));
PulsarService pulsar = new PulsarService(config);
// create znode using different zk-session
final String brokerZnode = LoadManager.LOADBALANCE_BROKERS_ROOT + "/" + pulsar.getAdvertisedAddress() + ":"
+ config.getWebServicePort().get();
pulsar1.getLocalMetadataStore()
.put(brokerZnode, new byte[0], Optional.empty(), EnumSet.of(CreateOption.Ephemeral)).join();
try {
pulsar.start();
fail("should have failed");
} catch (PulsarServerException e) {
//Ok.
}
pulsar.close();
}
|
@Override
public RetrievableStateHandle<T> addAndLock(String pathInZooKeeper, T state)
throws PossibleInconsistentStateException, Exception {
checkNotNull(pathInZooKeeper, "Path in ZooKeeper");
checkNotNull(state, "State");
final String path = normalizePath(pathInZooKeeper);
final Optional<Stat> maybeStat = getStat(path);
if (maybeStat.isPresent()) {
if (isNotMarkedForDeletion(maybeStat.get())) {
throw new AlreadyExistException(
String.format("ZooKeeper node %s already exists.", path));
}
Preconditions.checkState(
releaseAndTryRemove(path),
"The state is marked for deletion and, therefore, should be deletable.");
}
final RetrievableStateHandle<T> storeHandle = storage.store(state);
final byte[] serializedStoreHandle = serializeOrDiscard(storeHandle);
try {
writeStoreHandleTransactionally(path, serializedStoreHandle);
return storeHandle;
} catch (KeeperException.NodeExistsException e) {
// Transactions are not idempotent in the curator version we're currently using, so it
// is actually possible that we've re-tried a transaction that has already succeeded.
// We've ensured that the node hasn't been present prior executing the transaction, so
// we can assume that this is a result of the retry mechanism.
return storeHandle;
} catch (Exception e) {
if (indicatesPossiblyInconsistentState(e)) {
throw new PossibleInconsistentStateException(e);
}
// In case of any other failure, discard the state and rethrow the exception.
storeHandle.discardState();
throw e;
}
}
|
@Test
void testAddAndLock() throws Exception {
final TestingLongStateHandleHelper longStateStorage = new TestingLongStateHandleHelper();
ZooKeeperStateHandleStore<TestingLongStateHandleHelper.LongStateHandle> store =
new ZooKeeperStateHandleStore<>(getZooKeeperClient(), longStateStorage);
// Config
final String pathInZooKeeper = "/testAdd";
final long state = 1239712317L;
// Test
store.addAndLock(pathInZooKeeper, new TestingLongStateHandleHelper.LongStateHandle(state));
// Verify
// State handle created
assertThat(store.getAllAndLock()).hasSize(1);
assertThat(store.getAndLock(pathInZooKeeper).retrieveState().getValue()).isEqualTo(state);
// Path created and is persistent
Stat stat = getZooKeeperClient().checkExists().forPath(pathInZooKeeper);
assertThat(stat).isNotNull();
assertThat(stat.getEphemeralOwner()).isZero();
List<String> children = getZooKeeperClient().getChildren().forPath(pathInZooKeeper);
// There should be one child which is the locks subfolder
final String locksSubfolderChild = Iterables.getOnlyElement(children);
stat =
getZooKeeperClient()
.checkExists()
.forPath(generateZookeeperPath(pathInZooKeeper, locksSubfolderChild));
assertThat(stat).isNotNull();
assertThat(stat.getEphemeralOwner())
.as("The lock subfolder shouldn't be ephemeral")
.isZero();
List<String> lockChildren =
getZooKeeperClient()
.getChildren()
.forPath(generateZookeeperPath(pathInZooKeeper, locksSubfolderChild));
// Only one lock is expected
final String lockChild = Iterables.getOnlyElement(lockChildren);
stat =
getZooKeeperClient()
.checkExists()
.forPath(
generateZookeeperPath(
pathInZooKeeper, locksSubfolderChild, lockChild));
assertThat(stat.getEphemeralOwner()).as("The lock node should be ephemeral").isNotZero();
// Data is equal
@SuppressWarnings("unchecked")
final long actual =
((RetrievableStateHandle<TestingLongStateHandleHelper.LongStateHandle>)
InstantiationUtil.deserializeObject(
getZooKeeperClient().getData().forPath(pathInZooKeeper),
ClassLoader.getSystemClassLoader()))
.retrieveState()
.getValue();
assertThat(actual).isEqualTo(state);
}
|
public void validateReadPermission(String serverUrl, String personalAccessToken) {
HttpUrl url = buildUrl(serverUrl, "/rest/api/1.0/repos");
doGet(personalAccessToken, url, body -> buildGson().fromJson(body, RepositoryList.class));
}
|
@Test
public void fail_validate_url_on_non_json_result_log_correctly_the_response() {
server.enqueue(new MockResponse()
.setHeader("Content-Type", "application/json;charset=UTF-8")
.setResponseCode(500)
.setBody("not json"));
String serverUrl = server.url("/").toString();
assertThatThrownBy(() -> underTest.validateReadPermission(serverUrl, "token"))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Unable to contact Bitbucket server");
assertThat(String.join(", ", logTester.logs())).contains("Unable to contact Bitbucket server: 500 not json");
}
|
public static void checkValid(boolean isValid, String argName) {
checkArgument(isValid, "'%s' is invalid.", argName);
}
|
@Test
public void testCheckValid() throws Exception {
// Should not throw.
Validate.checkValid(true, "arg");
// Verify it throws.
ExceptionAsserts.assertThrows(
IllegalArgumentException.class,
"'arg' is invalid",
() -> Validate.checkValid(false, "arg"));
}
|
@Nullable
public static TraceContextOrSamplingFlags parseB3SingleFormat(CharSequence b3) {
return parseB3SingleFormat(b3, 0, b3.length());
}
|
@Test void parseB3SingleFormat_spanIdsNotYetSampled128() {
assertThat(parseB3SingleFormat(traceIdHigh + traceId + "-" + spanId).context())
.isEqualToComparingFieldByField(TraceContext.newBuilder()
.traceIdHigh(Long.parseUnsignedLong(traceIdHigh, 16))
.traceId(Long.parseUnsignedLong(traceId, 16))
.spanId(Long.parseUnsignedLong(spanId, 16)).build()
);
}
|
public static GetResourceProfileResponse mergeClusterResourceProfileResponse(
Collection<GetResourceProfileResponse> responses) {
GetResourceProfileResponse profileResponse =
Records.newRecord(GetResourceProfileResponse.class);
Resource resource = Resource.newInstance(0, 0);
for (GetResourceProfileResponse response : responses) {
if (response != null && response.getResource() != null) {
Resource responseResource = response.getResource();
resource = Resources.add(resource, responseResource);
}
}
profileResponse.setResource(resource);
return profileResponse;
}
|
@Test
public void testMergeResourceProfile() {
// normal response1
Resource resource1 = Resource.newInstance(1024, 1);
GetResourceProfileResponse response1 =
Records.newRecord(GetResourceProfileResponse.class);
response1.setResource(resource1);
// normal response2
Resource resource2 = Resource.newInstance(2048, 2);
GetResourceProfileResponse response2 =
Records.newRecord(GetResourceProfileResponse.class);
response2.setResource(resource2);
// empty response
GetResourceProfileResponse response3 =
Records.newRecord(GetResourceProfileResponse.class);
// null response
GetResourceProfileResponse response4 = null;
List<GetResourceProfileResponse> responses = new ArrayList<>();
responses.add(response1);
responses.add(response2);
responses.add(response3);
responses.add(response4);
GetResourceProfileResponse response =
RouterYarnClientUtils.mergeClusterResourceProfileResponse(responses);
Resource resource = response.getResource();
Assert.assertEquals(3, resource.getVirtualCores());
Assert.assertEquals(3072, resource.getMemorySize());
}
|
public static Collection<PValue> nonAdditionalInputs(AppliedPTransform<?, ?, ?> application) {
ImmutableList.Builder<PValue> mainInputs = ImmutableList.builder();
PTransform<?, ?> transform = application.getTransform();
for (Map.Entry<TupleTag<?>, PCollection<?>> input : application.getInputs().entrySet()) {
if (!transform.getAdditionalInputs().containsKey(input.getKey())) {
mainInputs.add(input.getValue());
}
}
checkArgument(
!mainInputs.build().isEmpty() || application.getInputs().isEmpty(),
"Expected at least one main input if any inputs exist");
return mainInputs.build();
}
|
@Test
public void nonAdditionalInputsWithMultipleNonAdditionalInputsSucceeds() {
Map<TupleTag<?>, PCollection<?>> allInputs = new HashMap<>();
PCollection<Integer> mainInts = pipeline.apply("MainInput", Create.of(12, 3));
allInputs.put(new TupleTag<Integer>() {}, mainInts);
PCollection<Void> voids = pipeline.apply("VoidInput", Create.empty(VoidCoder.of()));
allInputs.put(new TupleTag<Void>() {}, voids);
AppliedPTransform<PInput, POutput, TestTransform> transform =
AppliedPTransform.of(
"additional-free",
allInputs,
Collections.emptyMap(),
new TestTransform(),
ResourceHints.create(),
pipeline);
assertThat(
TransformInputs.nonAdditionalInputs(transform),
Matchers.containsInAnyOrder(voids, mainInts));
}
|
protected String stripBasePath(String requestPath, ContainerConfig config) {
if (!config.isStripBasePath()) {
return requestPath;
}
if (requestPath.startsWith(config.getServiceBasePath())) {
String newRequestPath = requestPath.replaceFirst(config.getServiceBasePath(), "");
if (!newRequestPath.startsWith("/")) {
newRequestPath = "/" + newRequestPath;
}
return newRequestPath;
}
return requestPath;
}
|
@Test
void requestReader_doubleBasePath() {
ContainerConfig config = ContainerConfig.defaultConfig();
config.setStripBasePath(true);
config.setServiceBasePath(BASE_PATH_MAPPING);
String finalPath = requestReader.stripBasePath("/" + BASE_PATH_MAPPING + "/" + BASE_PATH_MAPPING, config);
assertNotNull(finalPath);
assertEquals("/" + BASE_PATH_MAPPING, finalPath);
finalPath = requestReader.stripBasePath("/custom/" + BASE_PATH_MAPPING, config);
assertNotNull(finalPath);
assertEquals("/custom/" + BASE_PATH_MAPPING, finalPath);
finalPath = requestReader.stripBasePath(BASE_PATH_MAPPING, config);
assertNotNull(finalPath);
// the request path does not start with a "/", the comparison in the method should fail
// and nothing should get replaced
assertEquals(BASE_PATH_MAPPING, finalPath);
}
|
@Deactivate
public void deactivate() {
cfgService.unregisterProperties(getClass(), false);
providerRegistry.unregister(this);
mastershipService.removeListener(mastershipListener);
deviceService.removeListener(deviceListener);
alarmsExecutor.shutdown();
providerService = null;
log.info("Stopped");
}
|
@Test
public void deactivate() throws Exception {
provider.deactivate();
assertEquals("Device listener should be removed", 0, deviceListeners.size());
assertEquals("Mastership listener should be removed", 0, mastershipListeners.size());
assertFalse("Provider should not be registered", providerRegistry.getProviders().contains(provider.id()));
assertTrue(provider.alarmsExecutor.isShutdown());
assertNull(provider.providerService);
}
|
public static String toSnakeCase(String camelCase) {
if (camelCase == null || camelCase.isEmpty()) {
return camelCase;
}
StringBuilder result = new StringBuilder();
result.append(camelCase.substring(0, 1).toLowerCase()); // 将首字母转小写并添加到结果
for (int i = 1; i < camelCase.length(); i++) {
char c = camelCase.charAt(i);
if (Character.isUpperCase(c)) {
// 如果当前字符是大写字母,添加下划线并转换为小写
result.append("_").append(Character.toLowerCase(c));
} else {
// 否则,直接添加当前字符
result.append(c);
}
}
return result.toString();
}
|
@Test
void toSnakeCase() {
String str = StringUtils.toSnakeCase("TeacherStatics");
assertEquals("teacher_statics", str);
}
|
public static void checkResourcePerms(List<String> resources) {
if (resources == null || resources.isEmpty()) {
return;
}
for (String resource : resources) {
String[] items = StringUtils.split(resource, "=");
if (items.length != 2) {
throw new AclException(String.format("Parse Resource format error for %s.\n" +
"The expected resource format is 'Res=Perm'. For example: topicA=SUB", resource));
}
if (!AclConstants.DENY.equals(items[1].trim()) && Permission.DENY == Permission.parsePermFromString(items[1].trim())) {
throw new AclException(String.format("Parse resource permission error for %s.\n" +
"The expected permissions are 'SUB' or 'PUB' or 'SUB|PUB' or 'PUB|SUB'.", resource));
}
}
}
|
@Test(expected = AclException.class)
public void checkResourcePermsExceptionTest2() {
Permission.checkResourcePerms(Arrays.asList("topicA="));
}
|
public static byte[] floatToBytesLE(float f, byte[] bytes, int off) {
return intToBytesLE(Float.floatToIntBits(f), bytes, off);
}
|
@Test
public void testFloatToBytesLE() {
assertArrayEquals(FLOAT_PI_LE ,
ByteUtils.floatToBytesLE((float) Math.PI, new byte[4], 0));
}
|
public void handleUpdatedHostInfo(NodeInfo node, HostInfo hostInfo) {
if ( ! node.isDistributor()) return;
final int hostVersion;
if (hostInfo.getClusterStateVersionOrNull() == null) {
// TODO: Consider logging a warning in the future (>5.36).
// For now, a missing cluster state version probably means the content
// node has not been updated yet.
return;
} else {
hostVersion = hostInfo.getClusterStateVersionOrNull();
}
int currentStateVersion = clusterState.getVersion();
if (hostVersion != currentStateVersion) {
// The distributor may be old (null), or the distributor may not have updated
// to the latest state version just yet. We log here with fine, because it may
// also be a symptom of something wrong.
log.log(Level.FINE, () -> "Current state version is " + currentStateVersion +
", while host info received from distributor " + node.getNodeIndex() +
" is " + hostVersion);
return;
}
statsAggregator.updateForDistributor(node.getNodeIndex(),
StorageNodeStatsBridge.generate(hostInfo.getDistributor()));
}
|
@Test
void testStateVersionMismatch() {
when(nodeInfo.isDistributor()).thenReturn(true);
when(clusterState.getVersion()).thenReturn(101);
clusterStateView.handleUpdatedHostInfo(nodeInfo, createHostInfo("22"));
verify(statsAggregator, never()).updateForDistributor(anyInt(), any());
}
|
@Deprecated
public static <K, V>
PTransform<PCollection<? extends KV<K, TimestampedValue<V>>>, PCollection<KV<K, V>>>
extractFromValues() {
return new ExtractTimestampsFromValues<>();
}
|
@Test
@Category(ValidatesRunner.class)
public void extractFromValuesWhenValueTimestampedLaterSucceeds() {
PCollection<KV<String, TimestampedValue<Integer>>> preified =
pipeline.apply(
Create.timestamped(
TimestampedValue.of(
KV.of("foo", TimestampedValue.of(0, new Instant(0))), new Instant(100)),
TimestampedValue.of(
KV.of("foo", TimestampedValue.of(1, new Instant(1))), new Instant(101L)),
TimestampedValue.of(
KV.of("bar", TimestampedValue.of(2, new Instant(2))), new Instant(102L)),
TimestampedValue.of(
KV.of("baz", TimestampedValue.of(3, new Instant(3))), new Instant(103L))));
PCollection<KV<String, Integer>> timestamped =
preified.apply(ReifyTimestamps.extractFromValues());
PAssert.that(timestamped)
.containsInAnyOrder(KV.of("foo", 0), KV.of("foo", 1), KV.of("bar", 2), KV.of("baz", 3));
timestamped.apply(
"AssertElementTimestamps",
ParDo.of(
new DoFn<KV<String, Integer>, Void>() {
@ProcessElement
public void verifyTimestampsEqualValue(ProcessContext context) {
assertThat(
new Instant(context.element().getValue().longValue()),
equalTo(context.timestamp()));
}
}));
pipeline.run();
}
|
@Override
public boolean contains(Object o) {
for (Set<QueryableEntry> otherIndexedResult : indexedResults) {
if (otherIndexedResult.contains(o)) {
return true;
}
}
return false;
}
|
@Test
public void contains() {
int size = 100000;
Set<QueryableEntry> entries1 = generateEntries(size);
Set<QueryableEntry> entries2 = generateEntries(size);
List<Set<QueryableEntry>> indexedResults = new ArrayList<>();
indexedResults.add(entries1);
indexedResults.add(entries2);
OrResultSet resultSet = new OrResultSet(indexedResults);
Set<QueryableEntry> combinedEntries = new HashSet<>(entries1);
combinedEntries.addAll(entries2);
for (QueryableEntry entry : combinedEntries) {
assertContains(resultSet, entry);
}
assertNotContains(resultSet, new DummyEntry());
}
|
public T send() throws IOException {
return web3jService.send(this, responseType);
}
|
@Test
public void testEthGetProof() throws Exception {
web3j.ethGetProof(
"0x7F0d15C7FAae65896648C8273B6d7E43f58Fa842",
Arrays.asList(
"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
"latest")
.send();
verifyResult(
"{\"jsonrpc\":\"2.0\",\"method\":\"eth_getProof\","
+ "\"params\":[\"0x7F0d15C7FAae65896648C8273B6d7E43f58Fa842\","
+ "[\"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421\"],"
+ "\"latest\"],"
+ "\"id\":0}");
}
|
@Override
public Batch toBatch() {
return new SparkBatch(
sparkContext, table, readConf, groupingKeyType(), taskGroups(), expectedSchema, hashCode());
}
|
@TestTemplate
public void testPartitionedHours() throws Exception {
createPartitionedTable(spark, tableName, "hours(ts)");
SparkScanBuilder builder = scanBuilder();
HoursFunction.TimestampToHoursFunction function = new HoursFunction.TimestampToHoursFunction();
UserDefinedScalarFunc udf = toUDF(function, expressions(fieldRef("ts")));
Predicate predicate =
new Predicate(
">=",
expressions(
udf, intLit(timestampStrToHourOrdinal("2017-11-22T06:02:09.243857+00:00"))));
pushFilters(builder, predicate);
Batch scan = builder.build().toBatch();
assertThat(scan.planInputPartitions().length).isEqualTo(8);
// NOT GTEQ
builder = scanBuilder();
predicate = new Not(predicate);
pushFilters(builder, predicate);
scan = builder.build().toBatch();
assertThat(scan.planInputPartitions().length).isEqualTo(2);
}
|
public void start() {
start(false);
}
|
@Test
@Disabled
public void tailWithLinesTest() {
Tailer tailer = new Tailer(FileUtil.file("f:/test/test.log"), Tailer.CONSOLE_HANDLER, 2);
tailer.start();
}
|
@Override
public <T extends Recorder> RetryContext<T> context() {
return new RetryContextImpl<>();
}
|
@Test
public void contextTest() throws Exception {
final Retry retry = Retry.create(build("context", false));
final RetryContext<Recorder> context = retry.context();
final Recorder recorder = Mockito.mock(Recorder.class);
context.onBefore(recorder);
Mockito.verify(recorder, Mockito.times(1)).beforeRequest();
long consumeTimeMs = 100L;
final IOException ioError = new ConnectException("io error");
context.onError(recorder, ioError, consumeTimeMs);
Mockito.verify(recorder, Mockito.times(1)).errorRequest(ioError, consumeTimeMs);
boolean isRight = false;
try {
context.onError(recorder, new IllegalArgumentException("error"), consumeTimeMs);
} catch (RetryException ex) {
isRight = true;
}
Assert.assertTrue(isRight);
final boolean result = context.onResult(recorder, new Object(), consumeTimeMs);
Assert.assertFalse(result);
Mockito.verify(recorder, Mockito.times(1)).afterRequest(consumeTimeMs);
context.onComplete(recorder);
Mockito.verify(recorder, Mockito.times(1)).completeRequest();
}
|
public Host lookup(final String uuid) {
return this.stream().filter(h -> h.getUuid().equals(uuid)).findFirst().orElse(null);
}
|
@Test
public void testLookup() {
final AbstractHostCollection c = new AbstractHostCollection() {
};
final Host bookmark = new Host(new TestProtocol());
assertFalse(c.find(new AbstractHostCollection.HostComparePredicate(bookmark)).isPresent());
assertNull(c.lookup(bookmark.getUuid()));
c.add(bookmark);
assertTrue(c.find(new AbstractHostCollection.HostComparePredicate(bookmark)).isPresent());
assertNotNull(c.lookup(bookmark.getUuid()));
}
|
@Override
public int permitMaximum() {
return maxConnections;
}
|
@Test
void permitMaximum() {
builder.maxConnections(2);
Http2AllocationStrategy strategy = builder.build();
assertThat(strategy.maxConcurrentStreams()).isEqualTo(DEFAULT_MAX_CONCURRENT_STREAMS);
assertThat(strategy.permitMaximum()).isEqualTo(2);
assertThat(strategy.permitMinimum()).isEqualTo(DEFAULT_MIN_CONNECTIONS);
}
|
@PostMapping("")
public ShenyuAdminResult createSelector(@Valid @RequestBody final SelectorDTO selectorDTO) {
selectorService.createOrUpdate(selectorDTO);
return ShenyuAdminResult.success(ShenyuResultMessage.CREATE_SUCCESS, selectorDTO.getId());
}
|
@Test
public void createSelector() throws Exception {
SelectorDTO selectorDTO = SelectorDTO.builder()
.id("123")
.name("test123")
.continued(true)
.type(1)
.loged(true)
.enabled(true)
.matchRestful(false)
.pluginId("2")
.sort(1)
.namespaceId(SYS_DEFAULT_NAMESPACE_ID)
.build();
SpringBeanUtils.getInstance().setApplicationContext(mock(ConfigurableApplicationContext.class));
when(SpringBeanUtils.getInstance().getBean(SelectorMapper.class)).thenReturn(selectorMapper);
when(selectorMapper.existed(selectorDTO.getId())).thenReturn(true);
when(SpringBeanUtils.getInstance().getBean(PluginMapper.class)).thenReturn(pluginMapper);
when(pluginMapper.existed(selectorDTO.getPluginId())).thenReturn(true);
when(SpringBeanUtils.getInstance().getBean(NamespaceMapper.class)).thenReturn(namespaceMapper);
when(namespaceMapper.existed(SYS_DEFAULT_NAMESPACE_ID)).thenReturn(true);
given(this.selectorService.createOrUpdate(selectorDTO)).willReturn(1);
this.mockMvc.perform(MockMvcRequestBuilders.post("/selector")
.contentType(MediaType.APPLICATION_JSON)
.content(GsonUtils.getInstance().toJson(selectorDTO)))
.andExpect(status().isOk())
.andExpect(jsonPath("$.message", is(ShenyuResultMessage.CREATE_SUCCESS)))
.andReturn();
}
|
void remove(int brokerId) {
BrokerHeartbeatState broker = brokers.remove(brokerId);
if (broker != null) {
untrack(broker);
}
}
|
@Test
public void testBrokerHeartbeatStateList() {
BrokerHeartbeatStateList list = new BrokerHeartbeatStateList();
assertNull(list.first());
BrokerHeartbeatStateIterator iterator = list.iterator();
assertFalse(iterator.hasNext());
BrokerHeartbeatState broker0 = new BrokerHeartbeatState(0);
broker0.lastContactNs = 200;
BrokerHeartbeatState broker1 = new BrokerHeartbeatState(1);
broker1.lastContactNs = 100;
BrokerHeartbeatState broker2 = new BrokerHeartbeatState(2);
broker2.lastContactNs = 50;
BrokerHeartbeatState broker3 = new BrokerHeartbeatState(3);
broker3.lastContactNs = 150;
list.add(broker0);
list.add(broker1);
list.add(broker2);
list.add(broker3);
assertEquals(broker2, list.first());
iterator = list.iterator();
assertEquals(broker2, iterator.next());
assertEquals(broker1, iterator.next());
assertEquals(broker3, iterator.next());
assertEquals(broker0, iterator.next());
assertFalse(iterator.hasNext());
list.remove(broker1);
iterator = list.iterator();
assertEquals(broker2, iterator.next());
assertEquals(broker3, iterator.next());
assertEquals(broker0, iterator.next());
assertFalse(iterator.hasNext());
}
|
public void popMessageAsync(
final String brokerName, final String addr, final PopMessageRequestHeader requestHeader,
final long timeoutMillis, final PopCallback popCallback
) throws RemotingException, InterruptedException {
final RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.POP_MESSAGE, requestHeader);
this.remotingClient.invokeAsync(addr, request, timeoutMillis, new InvokeCallback() {
@Override
public void operationComplete(ResponseFuture responseFuture) {
}
@Override
public void operationSucceed(RemotingCommand response) {
try {
PopResult popResult = MQClientAPIImpl.this.processPopResponse(brokerName, response, requestHeader.getTopic(), requestHeader);
popCallback.onSuccess(popResult);
} catch (Exception e) {
popCallback.onException(e);
}
}
@Override
public void operationFail(Throwable throwable) {
popCallback.onException(throwable);
}
});
}
|
@Test
public void testPopMessageAsync_Success() throws Exception {
final long popTime = System.currentTimeMillis();
final int invisibleTime = 10 * 1000;
doAnswer((Answer<Void>) mock -> {
InvokeCallback callback = mock.getArgument(3);
RemotingCommand request = mock.getArgument(1);
ResponseFuture responseFuture = new ResponseFuture(null, request.getOpaque(), 3 * 1000, null, null);
RemotingCommand response = RemotingCommand.createResponseCommand(PopMessageResponseHeader.class);
response.setCode(ResponseCode.SUCCESS);
response.setOpaque(request.getOpaque());
PopMessageResponseHeader responseHeader = (PopMessageResponseHeader) response.readCustomHeader();
responseHeader.setInvisibleTime(invisibleTime);
responseHeader.setPopTime(popTime);
responseHeader.setReviveQid(0);
responseHeader.setRestNum(1);
StringBuilder startOffsetInfo = new StringBuilder(64);
ExtraInfoUtil.buildStartOffsetInfo(startOffsetInfo, topic, 0, 0L);
responseHeader.setStartOffsetInfo(startOffsetInfo.toString());
StringBuilder msgOffsetInfo = new StringBuilder(64);
ExtraInfoUtil.buildMsgOffsetInfo(msgOffsetInfo, topic, 0, Collections.singletonList(0L));
responseHeader.setMsgOffsetInfo(msgOffsetInfo.toString());
response.setRemark("FOUND");
response.makeCustomHeaderToNet();
MessageExt message = new MessageExt();
message.setQueueId(0);
message.setFlag(12);
message.setQueueOffset(0L);
message.setCommitLogOffset(100L);
message.setSysFlag(0);
message.setBornTimestamp(System.currentTimeMillis());
message.setBornHost(new InetSocketAddress("127.0.0.1", 10));
message.setStoreTimestamp(System.currentTimeMillis());
message.setStoreHost(new InetSocketAddress("127.0.0.1", 11));
message.setBody("body".getBytes());
message.setTopic(topic);
message.putUserProperty("key", "value");
response.setBody(MessageDecoder.encode(message, false));
responseFuture.setResponseCommand(response);
callback.operationSucceed(responseFuture.getResponseCommand());
return null;
}).when(remotingClient).invokeAsync(anyString(), any(RemotingCommand.class), anyLong(), any(InvokeCallback.class));
final CountDownLatch done = new CountDownLatch(1);
mqClientAPI.popMessageAsync(brokerName, brokerAddr, new PopMessageRequestHeader(), 10 * 1000, new PopCallback() {
@Override
public void onSuccess(PopResult popResult) {
assertThat(popResult.getPopStatus()).isEqualTo(PopStatus.FOUND);
assertThat(popResult.getRestNum()).isEqualTo(1);
assertThat(popResult.getInvisibleTime()).isEqualTo(invisibleTime);
assertThat(popResult.getPopTime()).isEqualTo(popTime);
assertThat(popResult.getMsgFoundList()).size().isEqualTo(1);
done.countDown();
}
@Override
public void onException(Throwable e) {
Assertions.fail("want no exception but got one", e);
done.countDown();
}
});
done.await();
}
|
public HashMap<String, String> parseInfoToGetUUID(String output, String queryURL, SAXBuilder builder) {
HashMap<String, String> uidToUrlMap = new HashMap<>();
try {
Document document = builder.build(new StringReader(output));
Element root = document.getRootElement();
List<Element> entries = root.getChildren("entry");
for (Element entry : entries) {
uidToUrlMap.put(queryURL, entry.getChild("repository").getChild("uuid").getValue());
}
} catch (Exception e) {
throw new RuntimeException(e);
}
return uidToUrlMap;
}
|
@Test
public void shouldParseSvnInfoOutputToConstructUrlToRemoteUUIDMapping() {
final SvnLogXmlParser svnLogXmlParser = new SvnLogXmlParser();
final String svnInfoOutput = """
<?xml version="1.0"?>
<info>
<entry
kind="dir"
path="trunk"
revision="3432">
<url>http://gears.googlecode.com/svn/trunk</url>
<repository>
<root>http://gears.googlecode.com/svn</root>
<uuid>fe895e04-df30-0410-9975-d76d301b4276</uuid>
</repository>
<commit
revision="3430">
<author>gears.daemon</author>
<date>2010-10-06T02:00:50.517477Z</date>
</commit>
</entry>
</info>""";
final HashMap<String, String> map = svnLogXmlParser.parseInfoToGetUUID(svnInfoOutput, "http://gears.googlecode.com/svn/trunk", new SAXBuilder());
assertThat(map.size()).isEqualTo(1);
assertThat(map.get("http://gears.googlecode.com/svn/trunk")).isEqualTo("fe895e04-df30-0410-9975-d76d301b4276");
}
|
public static String simplyEnvNameIfOverLimit(String envName) {
if (StringUtils.isNotBlank(envName) && envName.length() > MAX_ENV_NAME_LENGTH) {
return envName.substring(0, MAX_ENV_NAME_LENGTH) + MD5Utils.md5Hex(envName, "UTF-8");
}
return envName;
}
|
@Test
void testSimplyEnvNameIfOverLimit() {
StringBuilder envNameOverLimitBuilder = new StringBuilder("test");
for (int i = 0; i < 50; i++) {
envNameOverLimitBuilder.append(i);
}
String envName = envNameOverLimitBuilder.toString();
String actual = ParamUtil.simplyEnvNameIfOverLimit(envName);
String expect = envName.substring(0, 50) + MD5Utils.md5Hex(envName, "UTF-8");
assertEquals(expect, actual);
}
|
public static Future<Void> unregisterNodes(
Reconciliation reconciliation,
Vertx vertx,
AdminClientProvider adminClientProvider,
PemTrustSet pemTrustSet,
PemAuthIdentity pemAuthIdentity,
List<Integer> nodeIdsToUnregister
) {
try {
String bootstrapHostname = KafkaResources.bootstrapServiceName(reconciliation.name()) + "." + reconciliation.namespace() + ".svc:" + KafkaCluster.REPLICATION_PORT;
Admin adminClient = adminClientProvider.createAdminClient(bootstrapHostname, pemTrustSet, pemAuthIdentity);
List<Future<Void>> futures = new ArrayList<>();
for (Integer nodeId : nodeIdsToUnregister) {
futures.add(unregisterNode(reconciliation, vertx, adminClient, nodeId));
}
return Future.all(futures)
.eventually(() -> {
adminClient.close();
return Future.succeededFuture();
})
.map((Void) null);
} catch (KafkaException e) {
LOGGER.warnCr(reconciliation, "Failed to unregister nodes", e);
return Future.failedFuture(e);
}
}
|
@Test
void testUnregistration(VertxTestContext context) {
Admin mockAdmin = ResourceUtils.adminClient();
UnregisterBrokerResult ubr = mock(UnregisterBrokerResult.class);
when(ubr.all()).thenReturn(KafkaFuture.completedFuture(null));
ArgumentCaptor<Integer> unregisteredNodeIdCaptor = ArgumentCaptor.forClass(Integer.class);
when(mockAdmin.unregisterBroker(unregisteredNodeIdCaptor.capture())).thenReturn(ubr);
AdminClientProvider mockProvider = ResourceUtils.adminClientProvider(mockAdmin);
Checkpoint async = context.checkpoint();
KafkaNodeUnregistration.unregisterNodes(Reconciliation.DUMMY_RECONCILIATION, vertx, mockProvider, null, null, List.of(1874, 1919))
.onComplete(context.succeeding(v -> context.verify(() -> {
assertThat(unregisteredNodeIdCaptor.getAllValues().size(), is(2));
assertThat(unregisteredNodeIdCaptor.getAllValues(), hasItems(1874, 1919));
async.flag();
})));
}
|
@Override
public void removeIndex(String columnName, IndexType<?, ?, ?> indexType) {
// Text index is kept in its own files, thus can be removed directly.
if (indexType == StandardIndexes.text()) {
TextIndexUtils.cleanupTextIndex(_segmentDirectory, columnName);
return;
}
if (indexType == StandardIndexes.vector()) {
VectorIndexUtils.cleanupVectorIndex(_segmentDirectory, columnName);
return;
}
// Only remember to cleanup indices upon close(), if any existing
// index gets marked for removal.
if (_columnEntries.remove(new IndexKey(columnName, indexType)) != null) {
_shouldCleanupRemovedIndices = true;
}
}
|
@Test
public void testRemoveIndex()
throws IOException, ConfigurationException {
try (SingleFileIndexDirectory sfd = new SingleFileIndexDirectory(TEMP_DIR, _segmentMetadata, ReadMode.mmap)) {
sfd.newBuffer("col1", StandardIndexes.dictionary(), 1024);
sfd.removeIndex("col1", StandardIndexes.dictionary());
assertFalse(sfd.hasIndexFor("col1", StandardIndexes.dictionary()));
}
}
|
public IntArray(int capacity) {
elementData = new int[capacity];
}
|
@Test
public void testIntArray() {
IntArray array = new IntArray(10);
for (int i = 0; i < 10; i++) {
array.add(i * 2);
}
assertEquals(array.elementData, IntStream.range(0, 10).map(i -> i * 2).toArray());
int[] elementData = array.elementData;
array.add(1);
assertNotSame(elementData, array.elementData);
}
|
@Override
public ListView<String> getServicesOfServer(int pageNo, int pageSize) throws NacosException {
return getServicesOfServer(pageNo, pageSize, Constants.DEFAULT_GROUP);
}
|
@Test
void testGetServicesOfServer3() throws NacosException {
//given
int pageNo = 1;
int pageSize = 10;
AbstractSelector selector = new AbstractSelector("aaa") {
@Override
public String getType() {
return super.getType();
}
};
//when
client.getServicesOfServer(pageNo, pageSize, selector);
//then
verify(proxy, times(1)).getServiceList(pageNo, pageSize, Constants.DEFAULT_GROUP, selector);
}
|
public static String getLineSeparator() {
return System.getProperty("line.separator");
}
|
@Test
void testGetLineSeparator() {
String lineSeparator = DiskCache.getLineSeparator();
assertTrue(lineSeparator.length() > 0);
}
|
@Udf
public <T extends Comparable<? super T>> T arrayMin(@UdfParameter(
description = "Array of values from which to find the minimum") final List<T> input) {
if (input == null) {
return null;
}
T candidate = null;
for (T thisVal : input) {
if (thisVal != null) {
if (candidate == null) {
candidate = thisVal;
} else if (thisVal.compareTo(candidate) < 0) {
candidate = thisVal;
}
}
}
return candidate;
}
|
@Test
public void shouldFindDecimalMin() {
final List<BigDecimal> input =
Arrays.asList(BigDecimal.valueOf(1.2), BigDecimal.valueOf(1.3), BigDecimal.valueOf(-1.2));
assertThat(udf.arrayMin(input), is(BigDecimal.valueOf(-1.2)));
}
|
public static boolean isBigDecimalEquals(final BigDecimal one, final BigDecimal another) {
BigDecimal decimalOne;
BigDecimal decimalTwo;
if (one.scale() == another.scale()) {
decimalOne = one;
decimalTwo = another;
} else {
if (one.scale() > another.scale()) {
decimalOne = one;
decimalTwo = another.setScale(one.scale(), RoundingMode.UNNECESSARY);
} else {
decimalOne = one.setScale(another.scale(), RoundingMode.UNNECESSARY);
decimalTwo = another;
}
}
return 0 == decimalOne.compareTo(decimalTwo);
}
|
@Test
void assertIsBigDecimalEquals() {
BigDecimal one = BigDecimal.valueOf(3322L, 1);
BigDecimal another = BigDecimal.valueOf(33220L, 2);
assertTrue(DataConsistencyCheckUtils.isBigDecimalEquals(one, another));
}
|
public Set<String> groups(String clusterName) {
return clusterNodes.computeIfAbsent(clusterName, k -> new ConcurrentHashMap<>()).keySet();
}
|
@Test
public void testGroups() {
Assertions.assertDoesNotThrow(() -> metadata.groups("cluster"));
}
|
public static Result label(long durationInMillis) {
double nbSeconds = durationInMillis / 1000.0;
double nbMinutes = nbSeconds / 60;
double nbHours = nbMinutes / 60;
double nbDays = nbHours / 24;
double nbYears = nbDays / 365;
return getMessage(nbSeconds, nbMinutes, nbHours, nbDays, nbYears);
}
|
@Test
public void age_in_hours() {
long hours = 3;
DurationLabel.Result result = DurationLabel.label(now() - ago(hours * HOUR));
assertThat(result.key()).isEqualTo("duration.hours");
assertThat(result.value()).isEqualTo(hours);
}
|
@Override
public SmsSendRespDTO sendSms(Long sendLogId, String mobile, String apiTemplateId,
List<KeyValue<String, Object>> templateParams) throws Throwable {
Assert.notBlank(properties.getSignature(), "短信签名不能为空");
// 1. 执行请求
// 参考链接 https://api.aliyun.com/document/Dysmsapi/2017-05-25/SendSms
TreeMap<String, Object> queryParam = new TreeMap<>();
queryParam.put("PhoneNumbers", mobile);
queryParam.put("SignName", properties.getSignature());
queryParam.put("TemplateCode", apiTemplateId);
queryParam.put("TemplateParam", JsonUtils.toJsonString(MapUtils.convertMap(templateParams)));
queryParam.put("OutId", sendLogId);
JSONObject response = request("SendSms", queryParam);
// 2. 解析请求
return new SmsSendRespDTO()
.setSuccess(Objects.equals(response.getStr("Code"), RESPONSE_CODE_SUCCESS))
.setSerialNo(response.getStr("BizId"))
.setApiRequestId(response.getStr("RequestId"))
.setApiCode(response.getStr("Code"))
.setApiMsg(response.getStr("Message"));
}
|
@Test
public void tesSendSms_fail() throws Throwable {
try (MockedStatic<HttpUtils> httpUtilsMockedStatic = mockStatic(HttpUtils.class)) {
// 准备参数
Long sendLogId = randomLongId();
String mobile = randomString();
String apiTemplateId = randomString();
List<KeyValue<String, Object>> templateParams = Lists.newArrayList(
new KeyValue<>("code", 1234), new KeyValue<>("op", "login"));
// mock 方法
httpUtilsMockedStatic.when(() -> HttpUtils.post(anyString(), anyMap(), anyString()))
.thenReturn("{\"Message\":\"手机号码格式错误\",\"RequestId\":\"B7700B8E-227E-5886-9564-26036172F01F\",\"Code\":\"isv.MOBILE_NUMBER_ILLEGAL\"}");
// 调用
SmsSendRespDTO result = smsClient.sendSms(sendLogId, mobile, apiTemplateId, templateParams);
// 断言
assertFalse(result.getSuccess());
assertEquals("B7700B8E-227E-5886-9564-26036172F01F", result.getApiRequestId());
assertEquals("isv.MOBILE_NUMBER_ILLEGAL", result.getApiCode());
assertEquals("手机号码格式错误", result.getApiMsg());
assertNull(result.getSerialNo());
}
}
|
public Map<String, Integer> releaseHistoryRetentionSizeOverride() {
String overrideString = getValue("apollo.release-history.retention.size.override");
Map<String, Integer> releaseHistoryRetentionSizeOverride = Maps.newHashMap();
if (!Strings.isNullOrEmpty(overrideString)) {
releaseHistoryRetentionSizeOverride =
GSON.fromJson(overrideString, releaseHistoryRetentionSizeOverrideTypeReference);
}
return releaseHistoryRetentionSizeOverride.entrySet()
.stream()
.filter(entry -> entry.getValue() >= 1)
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
}
|
@Test
public void testReleaseHistoryRetentionSizeOverride() {
int someOverrideLimit = 10;
String overrideValueString = "{'a+b+c+b':10}";
when(environment.getProperty("apollo.release-history.retention.size.override")).thenReturn(overrideValueString);
int overrideValue = bizConfig.releaseHistoryRetentionSizeOverride().get("a+b+c+b");
assertEquals(someOverrideLimit, overrideValue);
overrideValueString = "{'a+b+c+b':0,'a+b+d+b':2}";
when(environment.getProperty("apollo.release-history.retention.size.override")).thenReturn(overrideValueString);
assertEquals(1, bizConfig.releaseHistoryRetentionSizeOverride().size());
overrideValue = bizConfig.releaseHistoryRetentionSizeOverride().get("a+b+d+b");
assertEquals(2, overrideValue);
overrideValueString = "{}";
when(environment.getProperty("apollo.release-history.retention.size.override")).thenReturn(overrideValueString);
assertEquals(0, bizConfig.releaseHistoryRetentionSizeOverride().size());
}
|
public QueueConfig setAsyncBackupCount(int asyncBackupCount) {
this.asyncBackupCount = checkAsyncBackupCount(backupCount, asyncBackupCount);
return this;
}
|
@Test(expected = IllegalArgumentException.class)
public void setAsyncBackupCount_whenItsNegative() {
queueConfig.setAsyncBackupCount(-1);
}
|
@Override
public RequestFuture requestFuture(Request request) throws NacosException {
Payload grpcRequest = GrpcUtils.convert(request);
final ListenableFuture<Payload> requestFuture = grpcFutureServiceStub.request(grpcRequest);
return new RequestFuture() {
@Override
public boolean isDone() {
return requestFuture.isDone();
}
@Override
public Response get() throws Exception {
Payload grpcResponse = requestFuture.get();
Response response = (Response) GrpcUtils.parse(grpcResponse);
if (response instanceof ErrorResponse) {
throw new NacosException(response.getErrorCode(), response.getMessage());
}
return response;
}
@Override
public Response get(long timeout) throws Exception {
Payload grpcResponse = requestFuture.get(timeout, TimeUnit.MILLISECONDS);
Response response = (Response) GrpcUtils.parse(grpcResponse);
if (response instanceof ErrorResponse) {
throw new NacosException(response.getErrorCode(), response.getMessage());
}
return response;
}
};
}
|
@Test
void testRequestFutureFailure() throws Exception {
assertThrows(NacosException.class, () -> {
when(future.get()).thenReturn(errorResponsePayload);
RequestFuture requestFuture = connection.requestFuture(new HealthCheckRequest());
assertTrue(requestFuture.isDone());
requestFuture.get();
});
}
|
public static NetFlowV5Packet parsePacket(ByteBuf bb) {
final int readableBytes = bb.readableBytes();
final NetFlowV5Header header = parseHeader(bb.slice(bb.readerIndex(), HEADER_LENGTH));
final int packetLength = HEADER_LENGTH + header.count() * RECORD_LENGTH;
if (header.count() <= 0 || readableBytes < packetLength) {
throw new CorruptFlowPacketException("Insufficient data (expected: " + packetLength + " bytes, actual: " + readableBytes + " bytes)");
}
final ImmutableList.Builder<NetFlowV5Record> records = ImmutableList.builder();
int offset = HEADER_LENGTH;
for (int i = 0; i < header.count(); i++) {
records.add(parseRecord(bb.slice(offset + bb.readerIndex(), RECORD_LENGTH)));
offset += RECORD_LENGTH;
}
return NetFlowV5Packet.create(header, records.build(), offset);
}
|
@Test
public void testParse2() throws IOException {
final byte[] b = Resources.toByteArray(Resources.getResource("netflow-data/netflow-v5-2.dat"));
NetFlowV5Packet packet = NetFlowV5Parser.parsePacket(Unpooled.wrappedBuffer(b));
assertNotNull(packet);
NetFlowV5Header h = packet.header();
assertEquals(5, h.version());
assertEquals(30, h.count());
assertEquals(234994, h.sysUptime());
assertEquals(1369017138, h.unixSecs());
assertEquals(805, h.unixNsecs());
assertEquals(30, packet.records().size());
final NetFlowV5Record r = packet.records().get(0);
assertEquals(InetAddresses.forString("192.168.124.20"), r.dstAddr());
assertEquals(6, r.protocol());
assertEquals(0, r.srcAs());
assertEquals(InetAddresses.forString("14.63.211.15"), r.srcAddr());
assertEquals(202992L, r.last());
assertEquals(47994, r.dstPort());
assertEquals(317221L, r.octetCount());
assertEquals(80, r.srcPort());
assertEquals(0, r.srcMask());
assertEquals(0, r.tos());
assertEquals(0, r.inputIface());
assertEquals(InetAddresses.forString("0.0.0.0"), r.nextHop());
assertEquals(27, r.tcpFlags());
assertEquals(0, r.dstAs());
assertEquals(0, r.outputIface());
assertEquals(202473L, r.first());
assertEquals(0, r.dstMask());
assertEquals(110L, r.packetCount());
}
|
public boolean containsMapping(String tableName, Mappings mappings) {
if (Objects.isNull(mappings) ||
CollectionUtils.isEmpty(mappings.getProperties())) {
return true;
}
return mappingStructures.containsKey(tableName)
&& mappingStructures.get(tableName)
.containsAllFields(new Fields(mappings));
}
|
@Test
public void containsMapping() {
IndexStructures structures = new IndexStructures();
HashMap<String, Object> properties = new HashMap<>();
properties.put("a", "b");
properties.put("c", "d");
properties.put("f", "g");
structures.putStructure("test", Mappings.builder()
.type(ElasticSearchClient.TYPE)
.properties(properties)
.source(new Mappings.Source())
.build(), new HashMap<>());
HashMap<String, Object> properties2 = new HashMap<>();
properties2.put("a", "b");
properties2.put("c", "d");
Assertions.assertTrue(structures.containsMapping(
"test",
Mappings.builder()
.type(ElasticSearchClient.TYPE)
.properties(properties2)
.source(new Mappings.Source())
.build()
));
HashMap<String, Object> properties3 = new HashMap<>();
properties3.put("a", "b");
properties3.put("q", "d");
Assertions.assertFalse(structures.containsMapping(
"test",
Mappings.builder()
.type(ElasticSearchClient.TYPE)
.properties(properties3)
.build()
));
}
|
@Override
public void setConfigAttributes(Object attributes) {
clear();
if (attributes == null) {
return;
}
List<Map> attrList = (List<Map>) attributes;
for (Map attrMap : attrList) {
String type = (String) attrMap.get("artifactTypeValue");
if (TestArtifactConfig.TEST_PLAN_DISPLAY_NAME.equals(type) || BuildArtifactConfig.ARTIFACT_PLAN_DISPLAY_NAME.equals(type)) {
String source = (String) attrMap.get(BuiltinArtifactConfig.SRC);
String destination = (String) attrMap.get(BuiltinArtifactConfig.DEST);
if (source.trim().isEmpty() && destination.trim().isEmpty()) {
continue;
}
if (TestArtifactConfig.TEST_PLAN_DISPLAY_NAME.equals(type)) {
this.add(new TestArtifactConfig(source, destination));
} else {
this.add(new BuildArtifactConfig(source, destination));
}
} else {
String artifactId = (String) attrMap.get(PluggableArtifactConfig.ID);
String storeId = (String) attrMap.get(PluggableArtifactConfig.STORE_ID);
String pluginId = (String) attrMap.get("pluginId");
Map<String, Object> userSpecifiedConfiguration = (Map<String, Object>) attrMap.get("configuration");
PluggableArtifactConfig pluggableArtifactConfig = new PluggableArtifactConfig(artifactId, storeId);
this.add(pluggableArtifactConfig);
if (userSpecifiedConfiguration == null) {
return;
}
if (StringUtils.isBlank(pluginId)) {
Configuration configuration = pluggableArtifactConfig.getConfiguration();
for (String key : userSpecifiedConfiguration.keySet()) {
Map<String, String> configurationMetadata = (Map<String, String>) userSpecifiedConfiguration.get(key);
if (configurationMetadata != null) {
boolean isSecure = Boolean.parseBoolean(configurationMetadata.get("isSecure"));
if (configuration.getProperty(key) == null) {
configuration.addNewConfiguration(key, isSecure);
}
if (isSecure) {
configuration.getProperty(key).setEncryptedValue(new EncryptedConfigurationValue(configurationMetadata.get("value")));
} else {
configuration.getProperty(key).setConfigurationValue(new ConfigurationValue(configurationMetadata.get("value")));
}
}
}
} else {
for (Map.Entry<String, Object> configuration : userSpecifiedConfiguration.entrySet()) {
pluggableArtifactConfig.getConfiguration().addNewConfigurationWithValue(configuration.getKey(), String.valueOf(configuration.getValue()), false);
}
}
}
}
}
|
@Test
public void setConfigAttributes_shouldSetConfigurationAsIsIfPluginIdIsBlank() throws CryptoException {
Map<Object, Object> imageMap = new HashMap<>();
imageMap.put("value", new GoCipher().encrypt("some-encrypted-value"));
imageMap.put("isSecure", "true");
Map<Object, Object> tagMap = new HashMap<>();
tagMap.put("value", "18.6.0");
tagMap.put("isSecure", "false");
Map<Object, Object> configurationMap1 = new HashMap<>();
configurationMap1.put("Image", imageMap);
configurationMap1.put("Tag", tagMap);
HashMap<String, Object> artifactPlan1 = new HashMap<>();
artifactPlan1.put("artifactTypeValue", "Pluggable Artifact");
artifactPlan1.put("id", "artifactId");
artifactPlan1.put("storeId", "storeId");
artifactPlan1.put("pluginId", "");
artifactPlan1.put("configuration", configurationMap1);
List<Map> artifactPlansList = new ArrayList<>();
artifactPlansList.add(artifactPlan1);
ArtifactTypeConfigs artifactTypeConfigs = new ArtifactTypeConfigs();
artifactTypeConfigs.setConfigAttributes(artifactPlansList);
assertThat(artifactTypeConfigs.size(), is(1));
PluggableArtifactConfig artifactConfig = (PluggableArtifactConfig) artifactTypeConfigs.get(0);
assertThat(artifactConfig.getArtifactType(), is(ArtifactType.external));
assertThat(artifactConfig.getId(), is("artifactId"));
assertThat(artifactConfig.getStoreId(), is("storeId"));
assertThat(artifactConfig.getConfiguration().getProperty("Image").getValue(), is("some-encrypted-value"));
assertThat(artifactConfig.getConfiguration().getProperty("Tag").getValue(), is("18.6.0"));
}
|
public static void checkState(boolean isValid, String message) throws IllegalStateException {
if (!isValid) {
throw new IllegalStateException(message);
}
}
|
@Test
public void testCheckStateWithThreeArguments() {
try {
Preconditions.checkState(true, "Test message %s %s %s", 12, null, "column");
} catch (IllegalStateException e) {
Assert.fail("Should not throw exception when isValid is true");
}
try {
Preconditions.checkState(false, "Test message %s %s %s", 12, null, "column");
Assert.fail("Should throw exception when isValid is false");
} catch (IllegalStateException e) {
Assert.assertEquals("Should format message", "Test message 12 null column", e.getMessage());
}
}
|
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof NiciraSetNshSi) {
NiciraSetNshSi that = (NiciraSetNshSi) obj;
return Objects.equals(nshSi, that.nshSi);
}
return false;
}
|
@Test
public void testEquals() {
new EqualsTester().addEqualityGroup(nshSi1, sameAsNshSi1).addEqualityGroup(nshSi2).testEquals();
}
|
@Override
public void deleteTenant(Long id) {
// 校验存在
validateUpdateTenant(id);
// 删除
tenantMapper.deleteById(id);
}
|
@Test
public void testDeleteTenant_notExists() {
// 准备参数
Long id = randomLongId();
// 调用, 并断言异常
assertServiceException(() -> tenantService.deleteTenant(id), TENANT_NOT_EXISTS);
}
|
@Override
public void useSmsCode(SmsCodeUseReqDTO reqDTO) {
// 检测验证码是否有效
SmsCodeDO lastSmsCode = validateSmsCode0(reqDTO.getMobile(), reqDTO.getCode(), reqDTO.getScene());
// 使用验证码
smsCodeMapper.updateById(SmsCodeDO.builder().id(lastSmsCode.getId())
.used(true).usedTime(LocalDateTime.now()).usedIp(reqDTO.getUsedIp()).build());
}
|
@Test
public void testUseSmsCode_success() {
// 准备参数
SmsCodeUseReqDTO reqDTO = randomPojo(SmsCodeUseReqDTO.class, o -> {
o.setMobile("15601691300");
o.setScene(randomEle(SmsSceneEnum.values()).getScene());
});
// mock 数据
SqlConstants.init(DbType.MYSQL);
smsCodeMapper.insert(randomPojo(SmsCodeDO.class, o -> {
o.setMobile(reqDTO.getMobile()).setScene(reqDTO.getScene())
.setCode(reqDTO.getCode()).setUsed(false);
}));
// 调用
smsCodeService.useSmsCode(reqDTO);
// 断言
SmsCodeDO smsCodeDO = smsCodeMapper.selectOne(null);
assertTrue(smsCodeDO.getUsed());
assertNotNull(smsCodeDO.getUsedTime());
assertEquals(reqDTO.getUsedIp(), smsCodeDO.getUsedIp());
}
|
public void awaitSynchronous() {
List<CompletableFuture<Void>> futures = pending.get();
if (futures.isEmpty()) {
return;
}
try {
CompletableFuture.allOf(futures.toArray(CompletableFuture[]::new)).join();
} catch (CompletionException e) {
logger.log(Level.WARNING, "", e);
} finally {
futures.clear();
}
}
|
@Test
public void awaitSynchronous_failure() {
var dispatcher = new EventDispatcher<Integer, Integer>(Runnable::run);
var future = new CompletableFuture<Void>();
future.completeExceptionally(new RuntimeException());
dispatcher.pending.get().add(future);
dispatcher.awaitSynchronous();
assertThat(dispatcher.pending.get()).isEmpty();
}
|
@Override
public void onHeartbeatSuccess(ConsumerGroupHeartbeatResponseData response) {
if (response.errorCode() != Errors.NONE.code()) {
String errorMessage = String.format(
"Unexpected error in Heartbeat response. Expected no error, but received: %s",
Errors.forCode(response.errorCode())
);
throw new IllegalArgumentException(errorMessage);
}
MemberState state = state();
if (state == MemberState.LEAVING) {
log.debug("Ignoring heartbeat response received from broker. Member {} with epoch {} is " +
"already leaving the group.", memberId, memberEpoch);
return;
}
if (state == MemberState.UNSUBSCRIBED && maybeCompleteLeaveInProgress()) {
log.debug("Member {} with epoch {} received a successful response to the heartbeat " +
"to leave the group and completed the leave operation. ", memberId, memberEpoch);
return;
}
if (isNotInGroup()) {
log.debug("Ignoring heartbeat response received from broker. Member {} is in {} state" +
" so it's not a member of the group. ", memberId, state);
return;
}
// Update the group member id label in the client telemetry reporter if the member id has
// changed. Initially the member id is empty, and it is updated when the member joins the
// group. This is done here to avoid updating the label on every heartbeat response. Also
// check if the member id is null, as the schema defines it as nullable.
if (response.memberId() != null && !response.memberId().equals(memberId)) {
clientTelemetryReporter.ifPresent(reporter -> reporter.updateMetricsLabels(
Collections.singletonMap(ClientTelemetryProvider.GROUP_MEMBER_ID, response.memberId())));
}
this.memberId = response.memberId();
updateMemberEpoch(response.memberEpoch());
ConsumerGroupHeartbeatResponseData.Assignment assignment = response.assignment();
if (assignment != null) {
if (!state.canHandleNewAssignment()) {
// New assignment received but member is in a state where it cannot take new
// assignments (ex. preparing to leave the group)
log.debug("Ignoring new assignment {} received from server because member is in {} state.",
assignment, state);
return;
}
Map<Uuid, SortedSet<Integer>> newAssignment = new HashMap<>();
assignment.topicPartitions().forEach(topicPartition ->
newAssignment.put(topicPartition.topicId(), new TreeSet<>(topicPartition.partitions())));
processAssignmentReceived(newAssignment);
}
}
|
@Test
public void testListenersGetNotifiedOfMemberEpochUpdatesOnlyIfItChanges() {
ConsumerMembershipManager membershipManager = createMembershipManagerJoiningGroup();
MemberStateListener listener = mock(MemberStateListener.class);
membershipManager.registerStateListener(listener);
int epoch = 5;
membershipManager.onHeartbeatSuccess(new ConsumerGroupHeartbeatResponseData()
.setErrorCode(Errors.NONE.code())
.setMemberId(MEMBER_ID)
.setMemberEpoch(epoch));
verify(listener).onMemberEpochUpdated(Optional.of(epoch), Optional.of(MEMBER_ID));
clearInvocations(listener);
membershipManager.onHeartbeatSuccess(new ConsumerGroupHeartbeatResponseData()
.setErrorCode(Errors.NONE.code())
.setMemberId(MEMBER_ID)
.setMemberEpoch(epoch));
verify(listener, never()).onMemberEpochUpdated(any(), any());
}
|
public CompletableFuture<Void> handlePullQuery(
final ServiceContext serviceContext,
final PullPhysicalPlan pullPhysicalPlan,
final ConfiguredStatement<Query> statement,
final RoutingOptions routingOptions,
final PullQueryWriteStream pullQueryQueue,
final CompletableFuture<Void> shouldCancelRequests
) {
final List<KsqlPartitionLocation> allLocations = pullPhysicalPlan.getMaterialization().locator()
.locate(
pullPhysicalPlan.getKeys(),
routingOptions,
routingFilterFactory,
pullPhysicalPlan.getPlanType() == PullPhysicalPlanType.RANGE_SCAN
);
final Map<Integer, List<Host>> emptyPartitions = allLocations.stream()
.filter(loc -> loc.getNodes().stream().noneMatch(node -> node.getHost().isSelected()))
.collect(Collectors.toMap(
KsqlPartitionLocation::getPartition,
loc -> loc.getNodes().stream().map(KsqlNode::getHost).collect(Collectors.toList())));
if (!emptyPartitions.isEmpty()) {
final MaterializationException materializationException = new MaterializationException(
"Unable to execute pull query. "
+ emptyPartitions.entrySet()
.stream()
.map(kv -> String.format(
"Partition %s failed to find valid host. Hosts scanned: %s",
kv.getKey(), kv.getValue()))
.collect(Collectors.joining(", ", "[", "]")));
LOG.debug(materializationException.getMessage());
throw materializationException;
}
// at this point we should filter out the hosts that we should not route to
final List<KsqlPartitionLocation> locations = allLocations
.stream()
.map(KsqlPartitionLocation::removeFilteredHosts)
.collect(Collectors.toList());
final CompletableFuture<Void> completableFuture = new CompletableFuture<>();
coordinatorExecutorService.submit(() -> {
try {
executeRounds(serviceContext, pullPhysicalPlan, statement, routingOptions,
locations, pullQueryQueue, shouldCancelRequests);
completableFuture.complete(null);
} catch (Throwable t) {
completableFuture.completeExceptionally(t);
}
});
return completableFuture;
}
|
@Test
public void shouldCallRouteQuery_couldNotFindHost() {
// Given:
location1 = new PartitionLocation(Optional.empty(), 1, ImmutableList.of());
locate(location1, location2, location3, location4);
// When:
final Exception e = assertThrows(
MaterializationException.class,
() -> haRouting.handlePullQuery(serviceContext, pullPhysicalPlan, statement, routingOptions,
pullQueryQueue, disconnect)
);
// Then:
assertThat(e.getMessage(),
containsString("Unable to execute pull query. " +
"[Partition 1 failed to find valid host. Hosts scanned: []]"));
}
|
@Override
public int readInt(@Nonnull String fieldName) throws IOException {
FieldDefinition fd = cd.getField(fieldName);
if (fd == null) {
return 0;
}
switch (fd.getType()) {
case INT:
return super.readInt(fieldName);
case BYTE:
return super.readByte(fieldName);
case CHAR:
return super.readChar(fieldName);
case SHORT:
return super.readShort(fieldName);
default:
throw createIncompatibleClassChangeError(fd, INT);
}
}
|
@Test
public void testReadInt() throws Exception {
int aByte = reader.readInt("byte");
int aShort = reader.readInt("short");
int aChar = reader.readInt("char");
int aInt = reader.readInt("int");
assertEquals(1, aByte);
assertEquals(3, aShort);
assertEquals(2, aChar);
assertEquals(4, aInt);
assertEquals(0, reader.readInt("NO SUCH FIELD"));
}
|
@Override
public Optional<BlobDescriptor> handleResponse(Response response) throws RegistryErrorException {
long contentLength = response.getContentLength();
if (contentLength < 0) {
throw new RegistryErrorExceptionBuilder(getActionDescription())
.addReason("Did not receive Content-Length header")
.build();
}
return Optional.of(new BlobDescriptor(contentLength, blobDigest));
}
|
@Test
public void testHandleResponse() throws RegistryErrorException {
Mockito.when(mockResponse.getContentLength()).thenReturn(0L);
BlobDescriptor expectedBlobDescriptor = new BlobDescriptor(0, fakeDigest);
BlobDescriptor blobDescriptor = testBlobChecker.handleResponse(mockResponse).get();
Assert.assertEquals(expectedBlobDescriptor, blobDescriptor);
}
|
public static void preserve(FileSystem targetFS, Path path,
CopyListingFileStatus srcFileStatus,
EnumSet<FileAttribute> attributes,
boolean preserveRawXattrs) throws IOException {
// strip out those attributes we don't need any more
attributes.remove(FileAttribute.BLOCKSIZE);
attributes.remove(FileAttribute.CHECKSUMTYPE);
// If not preserving anything from FileStatus, don't bother fetching it.
FileStatus targetFileStatus = attributes.isEmpty() ? null :
targetFS.getFileStatus(path);
String group = targetFileStatus == null ? null :
targetFileStatus.getGroup();
String user = targetFileStatus == null ? null :
targetFileStatus.getOwner();
boolean chown = false;
if (attributes.contains(FileAttribute.ACL)) {
List<AclEntry> srcAcl = srcFileStatus.getAclEntries();
List<AclEntry> targetAcl = getAcl(targetFS, targetFileStatus);
if (!srcAcl.equals(targetAcl)) {
targetFS.removeAcl(path);
targetFS.setAcl(path, srcAcl);
}
// setAcl doesn't preserve sticky bit, so also call setPermission if needed.
if (srcFileStatus.getPermission().getStickyBit() !=
targetFileStatus.getPermission().getStickyBit()) {
targetFS.setPermission(path, srcFileStatus.getPermission());
}
} else if (attributes.contains(FileAttribute.PERMISSION) &&
!srcFileStatus.getPermission().equals(targetFileStatus.getPermission())) {
targetFS.setPermission(path, srcFileStatus.getPermission());
}
final boolean preserveXAttrs = attributes.contains(FileAttribute.XATTR);
if (preserveXAttrs || preserveRawXattrs) {
final String rawNS =
StringUtils.toLowerCase(XAttr.NameSpace.RAW.name());
Map<String, byte[]> srcXAttrs = srcFileStatus.getXAttrs();
Map<String, byte[]> targetXAttrs = getXAttrs(targetFS, path);
if (srcXAttrs != null && !srcXAttrs.equals(targetXAttrs)) {
for (Entry<String, byte[]> entry : srcXAttrs.entrySet()) {
String xattrName = entry.getKey();
if (xattrName.startsWith(rawNS) || preserveXAttrs) {
targetFS.setXAttr(path, xattrName, entry.getValue());
}
}
}
}
// The replication factor can only be preserved for replicated files.
// It is ignored when either the source or target file are erasure coded.
if (attributes.contains(FileAttribute.REPLICATION) &&
!targetFileStatus.isDirectory() &&
!targetFileStatus.isErasureCoded() &&
!srcFileStatus.isErasureCoded() &&
srcFileStatus.getReplication() != targetFileStatus.getReplication()) {
targetFS.setReplication(path, srcFileStatus.getReplication());
}
if (attributes.contains(FileAttribute.GROUP) &&
!group.equals(srcFileStatus.getGroup())) {
group = srcFileStatus.getGroup();
chown = true;
}
if (attributes.contains(FileAttribute.USER) &&
!user.equals(srcFileStatus.getOwner())) {
user = srcFileStatus.getOwner();
chown = true;
}
if (chown) {
targetFS.setOwner(path, user, group);
}
if (attributes.contains(FileAttribute.TIMES)) {
targetFS.setTimes(path,
srcFileStatus.getModificationTime(),
srcFileStatus.getAccessTime());
}
}
|
@Test
public void testPreserveGroupOnFile() throws IOException {
FileSystem fs = FileSystem.get(config);
EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.GROUP);
Path dst = new Path("/tmp/dest2");
Path src = new Path("/tmp/src2");
createFile(fs, src);
createFile(fs, dst);
fs.setPermission(src, fullPerm);
fs.setOwner(src, "somebody", "somebody-group");
fs.setTimes(src, 0, 0);
fs.setReplication(src, (short) 1);
fs.setPermission(dst, noPerm);
fs.setOwner(dst, "nobody", "nobody-group");
fs.setTimes(dst, 100, 100);
fs.setReplication(dst, (short) 2);
CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));
DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);
CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));
// FileStatus.equals only compares path field, must explicitly compare all fields
Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission()));
Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner()));
Assert.assertTrue(srcStatus.getGroup().equals(dstStatus.getGroup()));
Assert.assertFalse(srcStatus.getAccessTime() == dstStatus.getAccessTime());
Assert.assertFalse(srcStatus.getModificationTime() == dstStatus.getModificationTime());
Assert.assertFalse(srcStatus.getReplication() == dstStatus.getReplication());
}
|
@Override
public Column convert(BasicTypeDefine typeDefine) {
PhysicalColumn.PhysicalColumnBuilder builder =
PhysicalColumn.builder()
.name(typeDefine.getName())
.sourceType(typeDefine.getColumnType())
.nullable(typeDefine.isNullable())
.defaultValue(typeDefine.getDefaultValue())
.comment(typeDefine.getComment());
String xuguDataType = typeDefine.getDataType().toUpperCase();
switch (xuguDataType) {
case XUGU_BOOLEAN:
case XUGU_BOOL:
builder.dataType(BasicType.BOOLEAN_TYPE);
break;
case XUGU_TINYINT:
builder.dataType(BasicType.BYTE_TYPE);
break;
case XUGU_SMALLINT:
builder.dataType(BasicType.SHORT_TYPE);
break;
case XUGU_INT:
case XUGU_INTEGER:
builder.dataType(BasicType.INT_TYPE);
break;
case XUGU_BIGINT:
builder.dataType(BasicType.LONG_TYPE);
break;
case XUGU_FLOAT:
builder.dataType(BasicType.FLOAT_TYPE);
break;
case XUGU_DOUBLE:
builder.dataType(BasicType.DOUBLE_TYPE);
break;
case XUGU_NUMBER:
case XUGU_DECIMAL:
case XUGU_NUMERIC:
DecimalType decimalType;
if (typeDefine.getPrecision() != null && typeDefine.getPrecision() > 0) {
decimalType =
new DecimalType(
typeDefine.getPrecision().intValue(), typeDefine.getScale());
} else {
decimalType = new DecimalType(DEFAULT_PRECISION, DEFAULT_SCALE);
}
builder.dataType(decimalType);
builder.columnLength(Long.valueOf(decimalType.getPrecision()));
builder.scale(decimalType.getScale());
break;
case XUGU_CHAR:
case XUGU_NCHAR:
builder.dataType(BasicType.STRING_TYPE);
if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) {
builder.columnLength(TypeDefineUtils.charTo4ByteLength(1L));
} else {
builder.columnLength(typeDefine.getLength());
}
break;
case XUGU_VARCHAR:
case XUGU_VARCHAR2:
builder.dataType(BasicType.STRING_TYPE);
if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) {
builder.columnLength(TypeDefineUtils.charTo4ByteLength(MAX_VARCHAR_LENGTH));
} else {
builder.columnLength(typeDefine.getLength());
}
break;
case XUGU_CLOB:
builder.dataType(BasicType.STRING_TYPE);
builder.columnLength(BYTES_2GB - 1);
break;
case XUGU_JSON:
case XUGU_GUID:
builder.dataType(BasicType.STRING_TYPE);
break;
case XUGU_BINARY:
builder.dataType(PrimitiveByteArrayType.INSTANCE);
builder.columnLength(MAX_BINARY_LENGTH);
break;
case XUGU_BLOB:
builder.dataType(PrimitiveByteArrayType.INSTANCE);
builder.columnLength(BYTES_2GB - 1);
break;
case XUGU_DATE:
builder.dataType(LocalTimeType.LOCAL_DATE_TYPE);
break;
case XUGU_TIME:
case XUGU_TIME_WITH_TIME_ZONE:
builder.dataType(LocalTimeType.LOCAL_TIME_TYPE);
break;
case XUGU_DATETIME:
case XUGU_DATETIME_WITH_TIME_ZONE:
builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE);
break;
case XUGU_TIMESTAMP:
case XUGU_TIMESTAMP_WITH_TIME_ZONE:
builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE);
if (typeDefine.getScale() == null) {
builder.scale(TIMESTAMP_DEFAULT_SCALE);
} else {
builder.scale(typeDefine.getScale());
}
break;
default:
throw CommonError.convertToSeaTunnelTypeError(
DatabaseIdentifier.XUGU, xuguDataType, typeDefine.getName());
}
return builder.build();
}
|
@Test
public void testConvertVarchar() {
BasicTypeDefine<Object> typeDefine =
BasicTypeDefine.builder()
.name("test")
.columnType("varchar")
.dataType("varchar")
.build();
Column column = XuguTypeConverter.INSTANCE.convert(typeDefine);
Assertions.assertEquals(typeDefine.getName(), column.getName());
Assertions.assertEquals(BasicType.STRING_TYPE, column.getDataType());
Assertions.assertEquals(240000, column.getColumnLength());
Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType());
typeDefine =
BasicTypeDefine.builder()
.name("test")
.columnType("varchar(10)")
.dataType("varchar")
.length(10L)
.build();
column = XuguTypeConverter.INSTANCE.convert(typeDefine);
Assertions.assertEquals(typeDefine.getName(), column.getName());
Assertions.assertEquals(BasicType.STRING_TYPE, column.getDataType());
Assertions.assertEquals(10, column.getColumnLength());
Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType());
typeDefine =
BasicTypeDefine.builder()
.name("test")
.columnType("varchar2(20)")
.dataType("varchar2")
.length(20L)
.build();
column = XuguTypeConverter.INSTANCE.convert(typeDefine);
Assertions.assertEquals(typeDefine.getName(), column.getName());
Assertions.assertEquals(BasicType.STRING_TYPE, column.getDataType());
Assertions.assertEquals(20, column.getColumnLength());
Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType());
}
|
public FEELFnResult<BigDecimal> invoke(@ParameterName("list") List list) {
if ( list == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "the list cannot be null"));
}
if (list.isEmpty()) {
return FEELFnResult.ofResult(null); // DMN spec, Table 75: ...or null if list is empty
}
BigDecimal sum = BigDecimal.ZERO;
for ( Object element : list ) {
if ( element instanceof BigDecimal ) {
sum = sum.add( (BigDecimal) element );
} else if ( element instanceof Number ) {
BigDecimal value = NumberEvalHelper.getBigDecimalOrNull(element );
if (value == null) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "an element in the list is not suitable for the sum"));
} else {
sum = sum.add( value );
}
} else {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "an element in the list is not a number"));
}
}
return FEELFnResult.ofResult( sum );
}
|
@Test
void invokeArrayParamContainsUnsupportedType() {
FunctionTestUtil.assertResultError(sumFunction.invoke(new Object[]{10, "test", 2}),
InvalidParametersEvent.class);
}
|
public static Metric metric(String name) {
return MetricsImpl.metric(name, Unit.COUNT);
}
|
@Test
public void metricInFusedStages() {
int inputSize = 100_000;
Integer[] inputs = new Integer[inputSize];
Arrays.setAll(inputs, i -> i);
pipeline.readFrom(TestSources.items(inputs))
.filter(l -> {
Metrics.metric("onlyInFilter").increment();
Metrics.metric("inBoth").increment();
return true;
})
.map(t -> {
Metrics.metric("onlyInMap").increment();
Metrics.metric("inBoth").increment();
return t;
})
.writeTo(Sinks.noop());
Job job = runPipeline(pipeline.toDag());
JobMetricsChecker checker = new JobMetricsChecker(job);
checker.assertSummedMetricValue("onlyInFilter", inputSize);
checker.assertSummedMetricValue("onlyInMap", inputSize);
checker.assertSummedMetricValue("inBoth", 2 * inputSize);
}
|
private RemotingCommand getTopicStatsInfo(ChannelHandlerContext ctx,
RemotingCommand request) throws RemotingCommandException {
final RemotingCommand response = RemotingCommand.createResponseCommand(null);
final GetTopicStatsInfoRequestHeader requestHeader =
(GetTopicStatsInfoRequestHeader) request.decodeCommandCustomHeader(GetTopicStatsInfoRequestHeader.class);
final String topic = requestHeader.getTopic();
TopicConfig topicConfig = this.brokerController.getTopicConfigManager().selectTopicConfig(topic);
if (null == topicConfig) {
response.setCode(ResponseCode.TOPIC_NOT_EXIST);
response.setRemark("topic[" + topic + "] not exist");
return response;
}
TopicStatsTable topicStatsTable = new TopicStatsTable();
int maxQueueNums = Math.max(topicConfig.getWriteQueueNums(), topicConfig.getReadQueueNums());
for (int i = 0; i < maxQueueNums; i++) {
MessageQueue mq = new MessageQueue();
mq.setTopic(topic);
mq.setBrokerName(this.brokerController.getBrokerConfig().getBrokerName());
mq.setQueueId(i);
TopicOffset topicOffset = new TopicOffset();
long min = this.brokerController.getMessageStore().getMinOffsetInQueue(topic, i);
if (min < 0) {
min = 0;
}
long max = this.brokerController.getMessageStore().getMaxOffsetInQueue(topic, i);
if (max < 0) {
max = 0;
}
long timestamp = 0;
if (max > 0) {
timestamp = this.brokerController.getMessageStore().getMessageStoreTimeStamp(topic, i, max - 1);
}
topicOffset.setMinOffset(min);
topicOffset.setMaxOffset(max);
topicOffset.setLastUpdateTimestamp(timestamp);
topicStatsTable.getOffsetTable().put(mq, topicOffset);
}
byte[] body = topicStatsTable.encode();
response.setBody(body);
response.setCode(ResponseCode.SUCCESS);
response.setRemark(null);
return response;
}
|
@Test
public void testGetTopicStatsInfo() throws RemotingCommandException {
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.GET_TOPIC_STATS_INFO, null);
request.addExtField("topic", "topicTest");
RemotingCommand response = adminBrokerProcessor.processRequest(handlerContext, request);
assertThat(response.getCode()).isEqualTo(ResponseCode.TOPIC_NOT_EXIST);
topicConfigManager = mock(TopicConfigManager.class);
when(brokerController.getTopicConfigManager()).thenReturn(topicConfigManager);
TopicConfig topicConfig = new TopicConfig();
topicConfig.setTopicName("topicTest");
when(topicConfigManager.selectTopicConfig(anyString())).thenReturn(topicConfig);
RemotingCommand responseSuccess = adminBrokerProcessor.processRequest(handlerContext, request);
assertThat(responseSuccess.getCode()).isEqualTo(ResponseCode.SUCCESS);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.