focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@Override
public void readLine(String line) {
if (line.startsWith("#") || line.isEmpty()) {
return;
}
// In some cases, ARIN may have multiple results with different NetType values. When that happens,
// we want to use the data from the entry with the data closest to the customer actually using the IP.
if (line.startsWith("NetType:")) {
prevNetworkType = currNetworkType;
currNetworkType = NetworkType.getEnum(lineValue(line));
if (null != currNetworkType && currNetworkType.isMoreSpecificThan(prevNetworkType)) {
this.organization = null;
this.countryCode = null;
}
}
if((line.startsWith("Organization:") || line.startsWith("Customer:")) && this.organization == null) {
this.organization = lineValue(line);
}
if(line.startsWith("Country:") && this.countryCode == null) {
this.countryCode = lineValue(line);
}
if(line.startsWith("ResourceLink") && !line.contains("http")) {
this.isRedirect = true;
registryRedirect = findRegistryFromWhoisServer(lineValue(line));
}
}
|
@Test
public void testRunFourMatches() throws Exception {
ARINResponseParser parser = new ARINResponseParser();
for (String line : FOUR_MATCH.split("\n")) {
parser.readLine(line);
}
assertFalse(parser.isRedirect());
assertNull(parser.getRegistryRedirect());
assertEquals("BAZ", parser.getCountryCode());
assertEquals("Reassigned Customer", parser.getOrganization());
}
|
@POST
@Path("/trigger")
@NoAuditEvent("No Audit Event needed") // TODO: do we need audit log here?
@RequiresPermissions(RestPermissions.DATANODE_MIGRATION)
@ApiOperation(value = "trigger migration step")
public Response trigger(@ApiParam(name = "request") @NotNull MigrationStepRequest request) {
final CurrentStateInformation newState = stateMachine.trigger(request.step(), request.args());
Response.ResponseBuilder response = newState.hasErrors() ? Response.serverError() : Response.ok();
return response.entity(newState)
.build();
}
|
@Test
public void requestReturnsSuccessfulResult() {
final MigrationStateResource resource = new MigrationStateResource(createStateMachine(), mockHttpHeaders(), journalConfiguration);
try (Response response = resource.trigger(new MigrationStepRequest(MigrationStep.SELECT_MIGRATION, Map.of()))) {
assertThat(response.getStatus()).isEqualTo(200);
assertThat(response.getEntity())
.isInstanceOf(CurrentStateInformation.class)
.extracting(e -> (CurrentStateInformation) e)
.satisfies(entity -> assertThat(entity.hasErrors()).isFalse());
}
}
|
public String transform() throws ScanException {
StringBuilder stringBuilder = new StringBuilder();
compileNode(node, stringBuilder, new Stack<Node>());
return stringBuilder.toString();
}
|
@Test
public void LOGBACK744_withColon() throws ScanException {
String input = "%d{HH:mm:ss.SSS} host:${host} %logger{36} - %msg%n";
Node node = makeNode(input);
NodeToStringTransformer nodeToStringTransformer = new NodeToStringTransformer(node, propertyContainer0);
System.out.println(nodeToStringTransformer.transform());
Assertions.assertEquals("%d{HH:mm:ss.SSS} host:local %logger{36} - %msg%n", nodeToStringTransformer.transform());
}
|
public void isNotEqualTo(@Nullable Object unexpected) {
standardIsNotEqualTo(unexpected);
}
|
@Test
public void isNotEqualToFailureWithSameObject() {
Object a = OBJECT_1;
Object b = a;
expectFailure.whenTesting().that(a).isNotEqualTo(b);
}
|
public GetLoanResponse createLoan(CreateLoanRequest createLoanRequest) {
String loanId = UUID.randomUUID().toString();
LoanDto loanDto = new LoanDto(
loanId,
createLoanRequest.term(),
createLoanRequest.originatedAmount(),
createLoanRequest.currency(),
createLoanRequest.targetInterestRate(),
createLoanRequest.effectiveInterestRate(),
createLoanRequest.externalReference(),
createLoanRequest.startDate(),
createLoanRequest.endDate(),
LoanStatus.CREATED,
createLoanRequest.timezone(),
createLoanRequest.region(),
createLoanRequest.state()
);
List<Installment> newInstallments = calculator.newInstallments(LoanTransformer.transformForNewInstallments(createLoanRequest));
List<LoanInstallmentDto> loanInstallmentDtos = LoanInstallmentTransformer.transform(newInstallments, loanId);
// TODO(hubert): Add transactions
loanInstallmentDtos.forEach(loanInstallmentDao::insert);
loanDao.insert(loanDto);
loanInstallmentDtos = loanInstallmentDao.findByLoanId(loanId);
LOGGER.info("Creating new Loan: %s\nInstallments: %s".formatted(loanDto, loanInstallmentDtos));
return new GetLoanResponse(
LoanTransformer.transformToLoanInfo(
loanDto,
loanInstallmentDtos
));
}
|
@Test
public void testCreateLoan_InterestBearing() {
int term = 4;
BigDecimal originatedAmount = BigDecimal.valueOf(100.0);
String currency = "USD";
BigDecimal targetInterestRate = BigDecimal.valueOf(0.1);
BigDecimal effectiveInterestRate = BigDecimal.valueOf(0.1);
String externalReference = UUID.randomUUID().toString();
LocalDate startDate = LocalDate.of(2023, 1, 1);
LocalDate endDate = startDate;
String timezone = "America/Los_Angeles";
String region = "USA";
String state = "CA";
CreateLoanRequest createLoanRequest = new CreateLoanRequest(
term,
originatedAmount,
currency,
targetInterestRate,
effectiveInterestRate,
externalReference,
startDate,
endDate,
timezone,
region,
state
);
GetLoanResponse loanResponse = loanResourceManager.createLoan(createLoanRequest);
// Verify that all expected fields are set correctly in the response
assertEquals(loanResponse.loanInfo().term(), term);
assertEquals(loanResponse.loanInfo().targetInterestRate(), targetInterestRate);
assertEquals(loanResponse.loanInfo().effectiveInterestRate(), effectiveInterestRate);
assertEquals(loanResponse.loanInfo().currency(), currency);
assertEquals(loanResponse.loanInfo().externalReference(), externalReference);
assertEquals(loanResponse.loanInfo().startDate(), startDate);
assertEquals(loanResponse.loanInfo().endDate(), endDate);
assertEquals(loanResponse.loanInfo().timezone(), timezone);
assertEquals(loanResponse.loanInfo().region(), region);
assertEquals(loanResponse.loanInfo().state(), state);
// Verify that the installment schedule is correct
List<LoanInstallmentInfo> loanInstallmentInfoList = loanResponse.loanInfo().loanInstallments();
assertEquals(loanInstallmentInfoList.size(), 4);
loanInstallmentInfoList.forEach(loanInstallmentInfo -> {
assertEquals(loanInstallmentInfo.loanId(), loanResponse.loanInfo().loanId());
assertEquals(loanInstallmentInfo.status(), InstallmentStatus.OWED);
});
LoanInstallmentInfo installment1 = loanInstallmentInfoList.get(0);
assertEquals(installment1.principalAmount(), BigDecimal.valueOf(24.67));
assertEquals(installment1.interestAmount(), BigDecimal.valueOf(0.85));
LoanInstallmentInfo installment2 = loanInstallmentInfoList.get(1);
assertEquals(installment2.principalAmount(), BigDecimal.valueOf(24.94));
assertEquals(installment2.interestAmount(), BigDecimal.valueOf(0.58));
LoanInstallmentInfo installment3 = loanInstallmentInfoList.get(2);
assertEquals(installment3.principalAmount(), BigDecimal.valueOf(25.09));
assertEquals(installment3.interestAmount(), BigDecimal.valueOf(0.43));
LoanInstallmentInfo installment4 = loanInstallmentInfoList.get(3);
assertEquals(installment4.principalAmount(), BigDecimal.valueOf(25.3).setScale(2));
assertEquals(installment4.interestAmount(), BigDecimal.valueOf(0.21));
}
|
public static boolean webSocketHostPathMatches(String hostPath, String targetPath) {
boolean exactPathMatch = true;
if (ObjectHelper.isEmpty(hostPath) || ObjectHelper.isEmpty(targetPath)) {
// This scenario should not really be possible as the input args come from the vertx-websocket consumer / producer URI
return false;
}
// Paths ending with '*' are Vert.x wildcard routes so match on the path prefix
if (hostPath.endsWith("*")) {
exactPathMatch = false;
hostPath = hostPath.substring(0, hostPath.lastIndexOf('*'));
}
String normalizedHostPath = HttpUtils.normalizePath(hostPath + "/");
String normalizedTargetPath = HttpUtils.normalizePath(targetPath + "/");
String[] hostPathElements = normalizedHostPath.split("/");
String[] targetPathElements = normalizedTargetPath.split("/");
if (exactPathMatch && hostPathElements.length != targetPathElements.length) {
return false;
}
if (exactPathMatch) {
return normalizedHostPath.equals(normalizedTargetPath);
} else {
return normalizedTargetPath.startsWith(normalizedHostPath);
}
}
|
@Test
void webSocketHostWildcardPathMatches() {
String hostPath = "/foo/bar/cheese/wine*";
String targetPath = "/foo/bar/cheese/wine/beer/additional/path";
assertTrue(VertxWebsocketHelper.webSocketHostPathMatches(hostPath, targetPath));
}
|
@Override
public HttpHeaders set(HttpHeaders headers) {
if (headers instanceof DefaultHttpHeaders) {
this.headers.set(((DefaultHttpHeaders) headers).headers);
return this;
} else {
return super.set(headers);
}
}
|
@Test
public void setObjectIterable() {
final DefaultHttpHeaders headers = newDefaultDefaultHttpHeaders();
headers.set(HEADER_NAME, HeaderValue.THREE.asList());
assertDefaultValues(headers, HeaderValue.THREE);
}
|
public CuratorFramework getClient() {
return client;
}
|
@Test
void getClient() {
CuratorFramework curatorFramework = client.getClient();
assertNotNull(curatorFramework);
}
|
public static <T extends BoundedWindow> TimestampPrefixingWindowCoder<T> of(
Coder<T> windowCoder) {
return new TimestampPrefixingWindowCoder<>(windowCoder);
}
|
@Test
public void testEncodeAndDecode() throws Exception {
List<IntervalWindow> intervalWindowsToTest =
Lists.newArrayList(
new IntervalWindow(new Instant(0L), new Instant(1L)),
new IntervalWindow(new Instant(100L), new Instant(200L)),
new IntervalWindow(new Instant(0L), BoundedWindow.TIMESTAMP_MAX_VALUE));
TimestampPrefixingWindowCoder<IntervalWindow> coder1 =
TimestampPrefixingWindowCoder.of(IntervalWindow.getCoder());
for (IntervalWindow window : intervalWindowsToTest) {
CoderProperties.coderDecodeEncodeEqual(coder1, window);
}
GlobalWindow globalWindow = GlobalWindow.INSTANCE;
TimestampPrefixingWindowCoder<GlobalWindow> coder2 =
TimestampPrefixingWindowCoder.of(GlobalWindow.Coder.INSTANCE);
CoderProperties.coderDecodeEncodeEqual(coder2, globalWindow);
TimestampPrefixingWindowCoder<CustomWindow> coder3 =
TimestampPrefixingWindowCoder.of(CustomWindowCoder.of(true, true));
for (CustomWindow window : CUSTOM_WINDOW_LIST) {
CoderProperties.coderDecodeEncodeEqual(coder3, window);
}
}
|
@VisibleForTesting
Pair<String, File> encryptSegmentIfNeeded(File tempDecryptedFile, File tempEncryptedFile,
boolean isUploadedSegmentEncrypted, String crypterUsedInUploadedSegment, String crypterClassNameInTableConfig,
String segmentName, String tableNameWithType) {
boolean segmentNeedsEncryption = StringUtils.isNotEmpty(crypterClassNameInTableConfig);
// form the output
File finalSegmentFile =
(isUploadedSegmentEncrypted || segmentNeedsEncryption) ? tempEncryptedFile : tempDecryptedFile;
String crypterClassName = StringUtils.isEmpty(crypterClassNameInTableConfig) ? crypterUsedInUploadedSegment
: crypterClassNameInTableConfig;
ImmutablePair<String, File> out = ImmutablePair.of(crypterClassName, finalSegmentFile);
if (!segmentNeedsEncryption) {
return out;
}
if (isUploadedSegmentEncrypted && !crypterClassNameInTableConfig.equals(crypterUsedInUploadedSegment)) {
throw new ControllerApplicationException(LOGGER, String.format(
"Uploaded segment is encrypted with '%s' while table config requires '%s' as crypter "
+ "(segment name = '%s', table name = '%s').", crypterUsedInUploadedSegment,
crypterClassNameInTableConfig, segmentName, tableNameWithType), Response.Status.INTERNAL_SERVER_ERROR);
}
// encrypt segment
PinotCrypter pinotCrypter = PinotCrypterFactory.create(crypterClassNameInTableConfig);
LOGGER.info("Using crypter class '{}' for encrypting '{}' to '{}' (segment name = '{}', table name = '{}').",
crypterClassNameInTableConfig, tempDecryptedFile, tempEncryptedFile, segmentName, tableNameWithType);
pinotCrypter.encrypt(tempDecryptedFile, tempEncryptedFile);
return out;
}
|
@Test
public void testEncryptSegmentIfNeededUploadedSegmentIsEncrypted() {
// arrange
boolean uploadedSegmentIsEncrypted = true;
String crypterClassNameInTableConfig = "NoOpPinotCrypter";
String crypterClassNameUsedInUploadedSegment = "NoOpPinotCrypter";
// act
Pair<String, File> encryptionInfo = _resource
.encryptSegmentIfNeeded(_decryptedFile, _encryptedFile, uploadedSegmentIsEncrypted,
crypterClassNameUsedInUploadedSegment, crypterClassNameInTableConfig, SEGMENT_NAME, TABLE_NAME);
// assert
assertEquals("NoOpPinotCrypter", encryptionInfo.getLeft());
assertEquals(_encryptedFile, encryptionInfo.getRight());
}
|
public static void init() {
init(null);
}
|
@Test
@Order(1)
public void testInit() throws IOException {
File rootSessionFile = new File(pathname);
if (rootSessionFile.exists()) {
rootSessionFile.delete();
}
SessionHolder.init(SessionMode.FILE);
try {
final File actual = new File(pathname);
Assertions.assertTrue(actual.exists());
Assertions.assertTrue(actual.isFile());
} finally {
SessionHolder.destroy();
}
}
|
public static <T extends Throwable> void checkNotEmpty(final String value, final Supplier<T> exceptionSupplierIfUnexpected) throws T {
if (Strings.isNullOrEmpty(value)) {
throw exceptionSupplierIfUnexpected.get();
}
}
|
@Test
void assertCheckNotEmptyWithStringToThrowsException() {
assertThrows(SQLException.class, () -> ShardingSpherePreconditions.checkNotEmpty((String) null, SQLException::new));
assertThrows(SQLException.class, () -> ShardingSpherePreconditions.checkNotEmpty("", SQLException::new));
}
|
@Override
protected SemanticProperties getSemanticPropertiesForLocalPropertyFiltering() {
// Local properties for MapPartition may not be preserved.
SingleInputSemanticProperties origProps =
((SingleInputOperator<?, ?, ?>) getOperator()).getSemanticProperties();
SingleInputSemanticProperties filteredProps = new SingleInputSemanticProperties();
FieldSet readSet = origProps.getReadFields(0);
if (readSet != null) {
filteredProps.addReadFields(readSet);
}
return filteredProps;
}
|
@Test
public void testGetSemanticProperties() {
SingleInputSemanticProperties origProps = new SingleInputSemanticProperties();
origProps.addForwardedField(0, 1);
origProps.addForwardedField(2, 2);
origProps.addReadFields(new FieldSet(0, 2, 4, 7));
MapPartitionOperatorBase<?, ?, ?> op = mock(MapPartitionOperatorBase.class);
when(op.getSemanticProperties()).thenReturn(origProps);
when(op.getKeyColumns(0)).thenReturn(new int[] {});
MapPartitionNode node = new MapPartitionNode(op);
SemanticProperties filteredProps = node.getSemanticPropertiesForLocalPropertyFiltering();
assertTrue(filteredProps.getForwardingTargetFields(0, 0).size() == 0);
assertTrue(filteredProps.getForwardingTargetFields(0, 2).size() == 0);
assertTrue(filteredProps.getForwardingSourceField(0, 1) < 0);
assertTrue(filteredProps.getForwardingSourceField(0, 2) < 0);
assertTrue(filteredProps.getReadFields(0).size() == 4);
assertTrue(filteredProps.getReadFields(0).contains(0));
assertTrue(filteredProps.getReadFields(0).contains(2));
assertTrue(filteredProps.getReadFields(0).contains(4));
assertTrue(filteredProps.getReadFields(0).contains(7));
}
|
@Override
public double getValue(double quantile) {
if (quantile < 0.0 || quantile > 1.0 || Double.isNaN(quantile)) {
throw new IllegalArgumentException(quantile + " is not in [0..1]");
}
if (values.length == 0) {
return 0.0;
}
final double pos = quantile * (values.length + 1);
final int index = (int) pos;
if (index < 1) {
return values[0];
}
if (index >= values.length) {
return values[values.length - 1];
}
final double lower = values[index - 1];
final double upper = values[index];
return lower + (pos - floor(pos)) * (upper - lower);
}
|
@Test
public void smallQuantilesAreTheFirstValue() {
assertThat(snapshot.getValue(0.0))
.isEqualTo(1, offset(0.1));
}
|
@Nullable
public UfsStatus addStatus(AlluxioURI path, UfsStatus status) {
if (!path.getName().equals(status.getName())) {
throw new IllegalArgumentException(
String.format("path name %s does not match ufs status name %s",
path.getName(), status.getName()));
}
return addStatusUnchecked(path, status);
}
|
@Test
public void testMismatchingChildComponent() {
UfsStatus s = Mockito.mock(UfsStatus.class);
when(s.getName()).thenReturn("name");
mThrown.expect(IllegalArgumentException.class);
mCache.addStatus(new AlluxioURI("/notName"), s);
}
|
public static Type convertType(TypeInfo typeInfo) {
switch (typeInfo.getOdpsType()) {
case BIGINT:
return Type.BIGINT;
case INT:
return Type.INT;
case SMALLINT:
return Type.SMALLINT;
case TINYINT:
return Type.TINYINT;
case FLOAT:
return Type.FLOAT;
case DECIMAL:
DecimalTypeInfo decimalTypeInfo = (DecimalTypeInfo) typeInfo;
return ScalarType.createUnifiedDecimalType(decimalTypeInfo.getPrecision(), decimalTypeInfo.getScale());
case DOUBLE:
return Type.DOUBLE;
case CHAR:
CharTypeInfo charTypeInfo = (CharTypeInfo) typeInfo;
return ScalarType.createCharType(charTypeInfo.getLength());
case VARCHAR:
VarcharTypeInfo varcharTypeInfo = (VarcharTypeInfo) typeInfo;
return ScalarType.createVarcharType(varcharTypeInfo.getLength());
case STRING:
case JSON:
return ScalarType.createDefaultCatalogString();
case BINARY:
return Type.VARBINARY;
case BOOLEAN:
return Type.BOOLEAN;
case DATE:
return Type.DATE;
case TIMESTAMP:
case DATETIME:
return Type.DATETIME;
case MAP:
MapTypeInfo mapTypeInfo = (MapTypeInfo) typeInfo;
return new MapType(convertType(mapTypeInfo.getKeyTypeInfo()),
convertType(mapTypeInfo.getValueTypeInfo()));
case ARRAY:
ArrayTypeInfo arrayTypeInfo = (ArrayTypeInfo) typeInfo;
return new ArrayType(convertType(arrayTypeInfo.getElementTypeInfo()));
case STRUCT:
StructTypeInfo structTypeInfo = (StructTypeInfo) typeInfo;
List<Type> fieldTypeList =
structTypeInfo.getFieldTypeInfos().stream().map(EntityConvertUtils::convertType)
.collect(Collectors.toList());
return new StructType(fieldTypeList);
default:
return Type.VARCHAR;
}
}
|
@Test
public void testConvertTypeCaseDate() {
TypeInfo typeInfo = TypeInfoFactory.DATE;
Type result = EntityConvertUtils.convertType(typeInfo);
assertEquals(Type.DATE, result);
}
|
public Result parse(final String string) throws DateNotParsableException {
return this.parse(string, new Date());
}
|
@Test
public void singleDaytestParseAlignToAGivenTime() throws Exception {
final DateTimeFormatter df = DateTimeFormat.forPattern("HH:mm:ss");
for(String[] test: singleDaytestsThatAlignToAGivenTime) {
NaturalDateParser.Result result = naturalDateParser.parse(test[0]);
assertNotNull(result.getFrom());
assertNotNull(result.getTo());
assertThat(df.print(result.getFrom())).as("time part of date should equal "+ test[1] + " in").isEqualTo(test[1]);
}
}
|
@GetMapping("/plugin/rule/delete")
public Mono<String> deleteRule(@RequestParam("selectorId") final String selectorId,
@RequestParam("id") final String id,
@RequestParam("pluginName") final String pluginName) {
RuleData ruleData = RuleData.builder().selectorId(selectorId).id(id).pluginName(pluginName).build();
subscriber.unRuleSubscribe(ruleData);
return Mono.just(Constants.SUCCESS);
}
|
@Test
public void testDeleteRule() throws Exception {
final String testSelectorId = "testSaveRuleId";
final String testRuleId = "ruleId";
final String pluginName = "pluginName";
final RuleData ruleData = createRuleData(testSelectorId, testRuleId);
subscriber.onRuleSubscribe(ruleData);
this.mockMvc
.perform(MockMvcRequestBuilders.get("/shenyu/plugin/rule/delete")
.param("selectorId", ruleData.getSelectorId())
.param("id", ruleData.getId())
.param("pluginName", pluginName)
.contentType(MediaType.APPLICATION_JSON))
.andExpect(status().isOk())
.andReturn();
final List<RuleData> selectorId = baseDataCache.obtainRuleData(testSelectorId);
Assertions.assertTrue(selectorId.isEmpty());
}
|
@SuppressWarnings({"SimplifyBooleanReturn"})
public static Map<String, ParamDefinition> cleanupParams(Map<String, ParamDefinition> params) {
if (params == null || params.isEmpty()) {
return params;
}
Map<String, ParamDefinition> mapped =
params.entrySet().stream()
.collect(
MapHelper.toListMap(
Map.Entry::getKey,
p -> {
ParamDefinition param = p.getValue();
if (param.getType() == ParamType.MAP) {
MapParamDefinition mapParamDef = param.asMapParamDef();
if (mapParamDef.getValue() == null
&& (mapParamDef.getInternalMode() == InternalParamMode.OPTIONAL)) {
return mapParamDef;
}
return MapParamDefinition.builder()
.name(mapParamDef.getName())
.value(cleanupParams(mapParamDef.getValue()))
.expression(mapParamDef.getExpression())
.name(mapParamDef.getName())
.validator(mapParamDef.getValidator())
.tags(mapParamDef.getTags())
.mode(mapParamDef.getMode())
.meta(mapParamDef.getMeta())
.build();
} else {
return param;
}
}));
Map<String, ParamDefinition> filtered =
mapped.entrySet().stream()
.filter(
p -> {
ParamDefinition param = p.getValue();
if (param.getInternalMode() == InternalParamMode.OPTIONAL) {
if (param.getValue() == null && param.getExpression() == null) {
return false;
} else if (param.getType() == ParamType.MAP
&& param.asMapParamDef().getValue() != null
&& param.asMapParamDef().getValue().isEmpty()) {
return false;
} else {
return true;
}
} else {
Checks.checkTrue(
param.getValue() != null || param.getExpression() != null,
String.format(
"[%s] is a required parameter (type=[%s])",
p.getKey(), param.getType()));
return true;
}
})
.collect(MapHelper.toListMap(Map.Entry::getKey, Map.Entry::getValue));
return cleanIntermediateMetadata(filtered);
}
|
@Test
public void testCleanupOptionalEmptyMap() throws JsonProcessingException {
Map<String, ParamDefinition> allParams =
parseParamDefMap("{'map': {'type': 'MAP','value': {}, 'internal_mode': 'OPTIONAL'}}");
Map<String, ParamDefinition> cleanedParams = ParamsMergeHelper.cleanupParams(allParams);
assertEquals(0, cleanedParams.size());
}
|
@Override
public void updateIndices(SegmentDirectory.Writer segmentWriter)
throws Exception {
Map<String, List<Operation>> columnOperationsMap = computeOperations(segmentWriter);
if (columnOperationsMap.isEmpty()) {
return;
}
for (Map.Entry<String, List<Operation>> entry : columnOperationsMap.entrySet()) {
String column = entry.getKey();
List<Operation> operations = entry.getValue();
for (Operation operation : operations) {
switch (operation) {
case DISABLE_FORWARD_INDEX:
// Deletion of the forward index will be handled outside the index handler to ensure that other index
// handlers that need the forward index to construct their own indexes will have it available.
_tmpForwardIndexColumns.add(column);
break;
case ENABLE_FORWARD_INDEX:
ColumnMetadata columnMetadata = createForwardIndexIfNeeded(segmentWriter, column, false);
if (columnMetadata.hasDictionary()) {
if (!segmentWriter.hasIndexFor(column, StandardIndexes.dictionary())) {
throw new IllegalStateException(String.format(
"Dictionary should still exist after rebuilding forward index for dictionary column: %s", column));
}
} else {
if (segmentWriter.hasIndexFor(column, StandardIndexes.dictionary())) {
throw new IllegalStateException(
String.format("Dictionary should not exist after rebuilding forward index for raw column: %s",
column));
}
}
break;
case DISABLE_DICTIONARY:
Set<String> newForwardIndexDisabledColumns =
FieldIndexConfigsUtil.columnsWithIndexDisabled(_fieldIndexConfigs.keySet(), StandardIndexes.forward(),
_fieldIndexConfigs);
if (newForwardIndexDisabledColumns.contains(column)) {
removeDictionaryFromForwardIndexDisabledColumn(column, segmentWriter);
if (segmentWriter.hasIndexFor(column, StandardIndexes.dictionary())) {
throw new IllegalStateException(
String.format("Dictionary should not exist after disabling dictionary for column: %s", column));
}
} else {
disableDictionaryAndCreateRawForwardIndex(column, segmentWriter);
}
break;
case ENABLE_DICTIONARY:
createDictBasedForwardIndex(column, segmentWriter);
if (!segmentWriter.hasIndexFor(column, StandardIndexes.forward())) {
throw new IllegalStateException(String.format("Forward index was not created for column: %s", column));
}
break;
case CHANGE_INDEX_COMPRESSION_TYPE:
rewriteForwardIndexForCompressionChange(column, segmentWriter);
break;
default:
throw new IllegalStateException("Unsupported operation for column " + column);
}
}
}
}
|
@Test
public void testEnableForwardIndexInRawModeForMultipleForwardIndexDisabledColumns()
throws Exception {
SegmentMetadataImpl existingSegmentMetadata = new SegmentMetadataImpl(_segmentDirectory);
SegmentDirectory segmentLocalFSDirectory =
new SegmentLocalFSDirectory(_segmentDirectory, existingSegmentMetadata, ReadMode.mmap);
SegmentDirectory.Writer writer = segmentLocalFSDirectory.createWriter();
IndexLoadingConfig indexLoadingConfig = new IndexLoadingConfig(null, _tableConfig);
Random rand = new Random();
String col1 = SV_FORWARD_INDEX_DISABLED_COLUMNS.get(rand.nextInt(SV_FORWARD_INDEX_DISABLED_COLUMNS.size()));
indexLoadingConfig.removeForwardIndexDisabledColumns(col1);
indexLoadingConfig.removeInvertedIndexColumns(col1);
indexLoadingConfig.addNoDictionaryColumns(col1);
String col2 = MV_FORWARD_INDEX_DISABLED_COLUMNS.get(rand.nextInt(MV_FORWARD_INDEX_DISABLED_COLUMNS.size()));
indexLoadingConfig.removeForwardIndexDisabledColumns(col2);
indexLoadingConfig.removeInvertedIndexColumns(col2);
indexLoadingConfig.addNoDictionaryColumns(col2);
ForwardIndexHandler fwdIndexHandler = new ForwardIndexHandler(segmentLocalFSDirectory, indexLoadingConfig, _schema);
fwdIndexHandler.updateIndices(writer);
fwdIndexHandler.postUpdateIndicesCleanup(writer);
// Tear down before validation. Because columns.psf and index map cleanup happens at segmentDirectory.close()
segmentLocalFSDirectory.close();
// Col1 validation.
ColumnMetadata metadata = existingSegmentMetadata.getColumnMetadataFor(col1);
validateIndexMap(col1, false, false);
validateForwardIndex(col1, CompressionCodec.LZ4, metadata.isSorted());
// In column metadata, nothing should change.
validateMetadataProperties(col1, false, 0, metadata.getCardinality(), metadata.getTotalDocs(),
metadata.getDataType(), metadata.getFieldType(), metadata.isSorted(), metadata.isSingleValue(),
metadata.getMaxNumberOfMultiValues(), metadata.getTotalNumberOfEntries(), metadata.isAutoGenerated(),
metadata.getMinValue(), metadata.getMaxValue(), false);
// Col2 validation.
metadata = existingSegmentMetadata.getColumnMetadataFor(col2);
validateIndexMap(col2, false, false);
validateForwardIndex(col2, CompressionCodec.LZ4, metadata.isSorted());
// In column metadata, nothing should change.
validateMetadataProperties(col2, false, 0, metadata.getCardinality(), metadata.getTotalDocs(),
metadata.getDataType(), metadata.getFieldType(), metadata.isSorted(), metadata.isSingleValue(),
metadata.getMaxNumberOfMultiValues(), metadata.getTotalNumberOfEntries(), metadata.isAutoGenerated(),
metadata.getMinValue(), metadata.getMaxValue(), false);
}
|
public String loginUser(String username, String password) throws RestClientException {
return loginUserWithHttpInfo(username, password).getBody();
}
|
@Test
public void loginUserTest() {
String username = null;
String password = null;
String response = api.loginUser(username, password);
// TODO: test validations
}
|
@Override
public ImportResult importItem(
UUID jobId,
IdempotentImportExecutor idempotentImportExecutor,
TokensAndUrlAuthData authData,
VideosContainerResource data)
throws Exception {
if (data == null) {
// Nothing to do
return ImportResult.OK;
}
AppleMediaInterface mediaInterface = factory
.getOrCreateMediaInterface(jobId, authData, appCredentials, exportingService, monitor);
// Uploads album metadata
final int albumCount =
mediaInterface.importAlbums(
jobId,
idempotentImportExecutor,
data.getAlbums().stream()
.map(MediaAlbum::videoToMediaAlbum)
.collect(Collectors.toList()),
DataVertical.PHOTOS.getDataType());
final Map<String, Long> importPhotosResult =
mediaInterface.importAllMedia(
jobId,
idempotentImportExecutor,
data.getVideos(),
DataVertical.VIDEOS.getDataType());
// generate import result
final ImportResult result = ImportResult.OK;
final Map<String, Integer> counts =
new ImmutableMap.Builder<String, Integer>()
.put(PhotosContainerResource.ALBUMS_COUNT_DATA_NAME, albumCount)
.put(
VideosContainerResource.VIDEOS_COUNT_DATA_NAME,
importPhotosResult.get(ApplePhotosConstants.COUNT_KEY).intValue())
.build();
return result
.copyWithBytes(importPhotosResult.get(ApplePhotosConstants.BYTES_KEY))
.copyWithCounts(counts);
}
|
@Test
public void importVideosWithFailure() throws Exception {
// set up
final int videoCount = ApplePhotosConstants.maxNewMediaRequests + 1;
;
final List<VideoModel> videos = createTestVideos(videoCount);
final int errorCountGetUploadURL = 10;
final int errorCountUploadContent = 10;
final int errorCountCreateMedia = 10;
final int successCount =
videoCount - errorCountGetUploadURL - errorCountUploadContent - errorCountCreateMedia;
final List<String> dataIds =
videos.stream().map(VideoModel::getDataId).collect(Collectors.toList());
final Map<String, Integer> datatIdToGetUploadURLStatus =
setUpErrors(dataIds, 0, errorCountGetUploadURL);
final Map<String, Integer> datatIdToUploadContentStatus =
setUpErrors(dataIds, errorCountGetUploadURL, errorCountUploadContent);
final Map<String, Integer> datatIdToCreateMediaStatus =
setUpErrors(
dataIds, errorCountGetUploadURL + errorCountUploadContent, errorCountCreateMedia);
setUpGetUploadUrlResponse(datatIdToGetUploadURLStatus);
setUpUploadContentResponse(datatIdToUploadContentStatus);
setUpCreateMediaResponse(datatIdToCreateMediaStatus);
// run test
VideosContainerResource data = new VideosContainerResource(null, videos);
final ImportResult importResult =
appleVideosImporter.importItem(uuid, executor, authData, data);
// verify correct methods were called
verify(mediaInterface, times(2)).getUploadUrl(anyString(), anyString(), anyList());
verify(mediaInterface)
.getUploadUrl(
uuid.toString(),
DataVertical.VIDEOS.getDataType(),
videos.subList(0, ApplePhotosConstants.maxNewMediaRequests).stream()
.map(VideoModel::getDataId)
.collect(Collectors.toList()));
verify(mediaInterface)
.getUploadUrl(
uuid.toString(),
DataVertical.VIDEOS.getDataType(),
videos.subList(ApplePhotosConstants.maxNewMediaRequests, videoCount).stream()
.map(VideoModel::getDataId)
.collect(Collectors.toList()));
verify(mediaInterface, times(2)).uploadContent(anyMap(), anyList());
verify(mediaInterface, times(2)).createMedia(anyString(), anyString(), anyList());
// check the result
assertThat(importResult.getCounts().isPresent()).isTrue();
assertThat(importResult.getCounts().get().get(VIDEOS_COUNT_DATA_NAME) == successCount).isTrue();
assertThat(importResult.getBytes().get() == successCount * VIDEOS_FILE_SIZE).isTrue();
final Map<String, Serializable> expectedKnownValue =
videos.stream()
.filter(
VideoModel ->
datatIdToGetUploadURLStatus.get(VideoModel.getDataId()) == SC_OK)
.filter(
VideoModel ->
datatIdToUploadContentStatus.get(VideoModel.getDataId()) == SC_OK)
.filter(
VideoModel ->
datatIdToCreateMediaStatus.get(VideoModel.getDataId()) == SC_OK)
.collect(
Collectors.toMap(
video -> video.getAlbumId() + "-" +video.getDataId(),
video -> VIDEOS_DATAID_BASE + video.getDataId()));
checkKnownValues(expectedKnownValue);
// check errors
List<ErrorDetail> expectedErrors = new ArrayList<>();
for (int i = 0;
i < errorCountGetUploadURL + errorCountUploadContent + errorCountCreateMedia;
i++) {
final VideoModel video = videos.get(i);
final ErrorDetail.Builder errorDetailBuilder =
ErrorDetail.builder()
.setId(video.getIdempotentId())
.setTitle(video.getName())
.setException(
String.format(
"java.io.IOException: %s Fail to get upload url, errorCode:%d",
ApplePhotosConstants.APPLE_PHOTOS_IMPORT_ERROR_PREFIX,
SC_INTERNAL_SERVER_ERROR));
if (i < errorCountGetUploadURL) {
errorDetailBuilder.setException(
String.format(
"java.io.IOException: %s Fail to get upload url, errorCode:%d",
ApplePhotosConstants.APPLE_PHOTOS_IMPORT_ERROR_PREFIX,
SC_INTERNAL_SERVER_ERROR));
} else if (i < errorCountGetUploadURL + errorCountGetUploadURL) {
errorDetailBuilder.setException(String.format(
"java.io.IOException: %s Fail to upload content",
ApplePhotosConstants.APPLE_PHOTOS_IMPORT_ERROR_PREFIX));
} else {
errorDetailBuilder.setException(
String.format(
"java.io.IOException: %s Fail to create media, errorCode:%d",
ApplePhotosConstants.APPLE_PHOTOS_IMPORT_ERROR_PREFIX,
SC_INTERNAL_SERVER_ERROR));
}
expectedErrors.add(errorDetailBuilder.build());
}
checkErrors(expectedErrors);
checkRecentErrors(expectedErrors);
}
|
@SuppressWarnings("unchecked")
@Override
public void configure(Map<String, ?> configs, boolean isKey) {
if (inner != null) {
log.error("Could not configure ListSerializer as the parameter has already been set -- inner: {}", inner);
throw new ConfigException("List serializer was already initialized using a non-default constructor");
}
final String innerSerdePropertyName = isKey ? CommonClientConfigs.DEFAULT_LIST_KEY_SERDE_INNER_CLASS : CommonClientConfigs.DEFAULT_LIST_VALUE_SERDE_INNER_CLASS;
final Object innerSerdeClassOrName = configs.get(innerSerdePropertyName);
if (innerSerdeClassOrName == null) {
throw new ConfigException("Not able to determine the serializer class because it was neither passed via the constructor nor set in the config.");
}
try {
if (innerSerdeClassOrName instanceof String) {
inner = Utils.newInstance((String) innerSerdeClassOrName, Serde.class).serializer();
} else if (innerSerdeClassOrName instanceof Class) {
inner = (Serializer<Inner>) ((Serde) Utils.newInstance((Class) innerSerdeClassOrName)).serializer();
} else {
throw new KafkaException("Could not create a serializer class instance using \"" + innerSerdePropertyName + "\" property.");
}
inner.configure(configs, isKey);
serStrategy = FIXED_LENGTH_SERIALIZERS.contains(inner.getClass()) ? SerializationStrategy.CONSTANT_SIZE : SerializationStrategy.VARIABLE_SIZE;
} catch (final ClassNotFoundException e) {
throw new ConfigException(innerSerdePropertyName, innerSerdeClassOrName, "Serializer class " + innerSerdeClassOrName + " could not be found.");
}
}
|
@Test
public void testListValueSerializerNoArgConstructorsShouldThrowKafkaExceptionDueClassNotFound() {
props.put(CommonClientConfigs.DEFAULT_LIST_VALUE_SERDE_INNER_CLASS, nonExistingClass);
final KafkaException exception = assertThrows(
KafkaException.class,
() -> listSerializer.configure(props, false)
);
assertEquals("Invalid value non.existing.class for configuration " + CommonClientConfigs.DEFAULT_LIST_VALUE_SERDE_INNER_CLASS + ": Serializer class " + nonExistingClass + " could not be found.", exception.getMessage());
}
|
static void validateDependencies(Set<Artifact> dependencies, Set<String> allowedRules, boolean failOnUnmatched)
throws EnforcerRuleException {
SortedSet<Artifact> unmatchedArtifacts = new TreeSet<>();
Set<String> matchedRules = new HashSet<>();
for (Artifact dependency : dependencies) {
boolean matches = false;
for (String rule : allowedRules) {
if (matches(dependency, rule)){
matchedRules.add(rule);
matches = true;
break;
}
}
if (!matches) {
unmatchedArtifacts.add(dependency);
}
}
SortedSet<String> unmatchedRules = new TreeSet<>(allowedRules);
unmatchedRules.removeAll(matchedRules);
if (!unmatchedArtifacts.isEmpty() || (failOnUnmatched && !unmatchedRules.isEmpty())) {
StringBuilder errorMessage = new StringBuilder("Vespa dependency enforcer failed:\n");
if (!unmatchedArtifacts.isEmpty()) {
errorMessage.append("Dependencies not matching any rule:\n");
unmatchedArtifacts.forEach(a -> errorMessage.append(" - ").append(a.toString()).append('\n'));
}
if (failOnUnmatched && !unmatchedRules.isEmpty()) {
errorMessage.append("Rules not matching any dependency:\n");
unmatchedRules.forEach(p -> errorMessage.append(" - ").append(p).append('\n'));
}
throw new EnforcerRuleException(errorMessage.toString());
}
}
|
@Test
void fails_on_scope_mismatch() {
Set<Artifact> dependencies = Set.of(
artifact("com.yahoo.vespa", "testutils", "8.0.0", "test"));
Set<String> rules = Set.of(
"com.yahoo.vespa:testutils:jar:8.0.0:provided");
EnforcerRuleException exception = assertThrows(
EnforcerRuleException.class,
() -> EnforceDependencies.validateDependencies(dependencies, rules, true));
String expectedErrorMessage =
"""
Vespa dependency enforcer failed:
Dependencies not matching any rule:
- com.yahoo.vespa:testutils:jar:8.0.0:test
Rules not matching any dependency:
- com.yahoo.vespa:testutils:jar:8.0.0:provided
""";
assertEquals(expectedErrorMessage, exception.getMessage());
}
|
static String metadataVersionAtDowngrade(Reconciliation reconciliation, String currentMetadataVersion, KafkaVersion versionTo) {
if (currentMetadataVersion != null) {
if (compareDottedIVVersions(currentMetadataVersion, versionTo.metadataVersion()) > 0) {
// The current metadata version is newer than the version we are downgrading to
// => something went completely wrong, and we should just throw an error
LOGGER.warnCr(reconciliation, "The current metadata version ({}) has to be lower or equal to the Kafka broker version we are downgrading to ({})", currentMetadataVersion, versionTo.version());
throw new KafkaUpgradeException("The current metadata version (" + currentMetadataVersion + ") has to be lower or equal to the Kafka broker version we are downgrading to (" + versionTo.version() + ")");
} else {
// We stick with the current metadata version for the first phase of the downgrade
// => it will be changed in the next phase (next reconciliation)
LOGGER.infoCr(reconciliation, "The current metadata version {} will be used in the first phase of the downgrade", currentMetadataVersion);
return currentMetadataVersion;
}
} else {
// Current metadata version is missing. This should normally not happen in downgrade as it suggests
// we are downgrading without the previous version being properly deployed. But in case it happens,
// we use the metadata version from the older version first.
LOGGER.warnCr(reconciliation, "The current metadata version seems to be missing during upgrade which is unexpected. The metadata version {} of the Kafka we are upgrading from will be used.", versionTo.metadataVersion());
return versionTo.metadataVersion();
}
}
|
@Test
public void testMetadataVersionAtDowngrade() {
assertThat(KRaftVersionChangeCreator.metadataVersionAtDowngrade(Reconciliation.DUMMY_RECONCILIATION, "3.6-IV2", VERSIONS.defaultVersion()),
is("3.6-IV2"));
assertThat(KRaftVersionChangeCreator.metadataVersionAtDowngrade(Reconciliation.DUMMY_RECONCILIATION, "3.6", VERSIONS.defaultVersion()),
is("3.6"));
assertThat(KRaftVersionChangeCreator.metadataVersionAtDowngrade(Reconciliation.DUMMY_RECONCILIATION, "3.4-IV2", VERSIONS.defaultVersion()),
is("3.4-IV2"));
assertThat(KRaftVersionChangeCreator.metadataVersionAtDowngrade(Reconciliation.DUMMY_RECONCILIATION, null, VERSIONS.defaultVersion()),
is(VERSIONS.defaultVersion().metadataVersion()));
KafkaUpgradeException ex = assertThrows(KafkaUpgradeException.class, () -> KRaftVersionChangeCreator.metadataVersionAtDowngrade(Reconciliation.DUMMY_RECONCILIATION, "5.11-IV2", VERSIONS.defaultVersion()));
assertThat(ex.getMessage(), is("The current metadata version (5.11-IV2) has to be lower or equal to the Kafka broker version we are downgrading to (" + VERSIONS.defaultVersion().version() + ")"));
}
|
public static String getDataSourceUnitsNode(final String databaseName) {
return String.join("/", getMetaDataNode(), databaseName, DATA_SOURCES_NODE, DATA_SOURCE_UNITS_NODE);
}
|
@Test
void assertGetMetaDataDataSourcesNode() {
assertThat(DataSourceMetaDataNode.getDataSourceUnitsNode("foo_db"), is("/metadata/foo_db/data_sources/units"));
}
|
public Map<String, String> getIdentityContext(RequestResource resource) {
Map<String, String> header = new HashMap<>(1);
for (ClientAuthService clientAuthService : clientAuthPluginManager.getAuthServiceSpiImplSet()) {
LoginIdentityContext loginIdentityContext = clientAuthService.getLoginIdentityContext(resource);
for (String key : loginIdentityContext.getAllKey()) {
header.put(key, loginIdentityContext.getParameter(key));
}
}
return header;
}
|
@Test
void testGetIdentityContext() {
Properties properties = new Properties();
properties.setProperty(PropertyKeyConst.USERNAME, "aaa");
properties.setProperty(PropertyKeyConst.PASSWORD, "123456");
securityProxy.login(properties);
//when
Map<String, String> keyMap = securityProxy.getIdentityContext(null);
//then
assertEquals("ttttttttttttttttt", keyMap.get(NacosAuthLoginConstant.ACCESSTOKEN));
}
|
MapPSet<E> underlying() {
return this.underlying;
}
|
@Test
public void testUnderlying() {
assertSame(SINGLETON_SET, new PCollectionsImmutableSet<>(SINGLETON_SET).underlying());
}
|
@Override
public void executeSystemTask(WorkflowSystemTask systemTask, String taskId, int callbackTime) {
try {
Task task = executionDAOFacade.getTaskById(taskId);
if (task == null) {
LOG.error("TaskId: {} could not be found while executing SystemTask", taskId);
return;
}
LOG.debug("Task: {} fetched from execution DAO for taskId: {}", task, taskId);
String queueName = QueueUtils.getQueueName(task);
if (task.getStatus().isTerminal()) {
// Tune the SystemTaskWorkerCoordinator's queues - if the queue size is very big this can
// happen!
LOG.info("Task {}/{} was already completed.", task.getTaskType(), task.getTaskId());
queueDAO.remove(queueName, task.getTaskId());
return;
}
String workflowId = task.getWorkflowInstanceId();
Workflow workflow = executionDAOFacade.getWorkflowById(workflowId, true);
if (task.getStartTime() == 0) {
task.setStartTime(System.currentTimeMillis());
executionDAOFacade.updateTask(task);
Monitors.recordQueueWaitTime(task.getTaskDefName(), task.getQueueWaitTime());
}
if (workflow.getStatus().isTerminal()) {
LOG.info(
"Workflow {} has been completed for {}/{}",
workflow.getWorkflowId(),
systemTask.getName(),
task.getTaskId());
if (!task.getStatus().isTerminal()) {
task.setStatus(CANCELED);
}
executionDAOFacade.updateTask(task);
queueDAO.remove(queueName, task.getTaskId());
return;
}
LOG.debug("Executing {}/{}-{}", task.getTaskType(), task.getTaskId(), task.getStatus());
if (task.getStatus() == SCHEDULED || !systemTask.isAsyncComplete(task)) {
task.setPollCount(task.getPollCount() + 1);
// removed poll count DB update here
}
deciderService.populateTaskData(task);
// Stop polling for asyncComplete system tasks that are not in SCHEDULED state
if (systemTask.isAsyncComplete(task) && task.getStatus() != SCHEDULED) {
queueDAO.remove(QueueUtils.getQueueName(task), task.getTaskId());
return;
}
taskRunner.runMaestroTask(this, workflow, task, systemTask);
if (!task.getStatus().isTerminal()) {
task.setCallbackAfterSeconds(callbackTime);
try {
configureCallbackInterval(task); // overwrite if needed
} catch (Exception e) {
LOG.error(
"Error configuring callback interval for task [{}]. Please investigate it",
task.getTaskId(),
e);
}
}
updateTask(new TaskResult(task));
LOG.debug(
"Done Executing {}/{}-{} output={}",
task.getTaskType(),
task.getTaskId(),
task.getStatus(),
task.getOutputData());
} catch (Exception e) {
Monitors.error("MaestroWorkflowExecutor", "executeSystemTask");
LOG.error("Error executing system task - {}, with id: {}", systemTask, taskId, e);
}
}
|
@Test
public void testExecuteSystemTaskPersistStartTime() {
String workflowId = "workflow-id";
String taskId = "task-id-1";
Task maestroTask = new Task();
maestroTask.setTaskType(Constants.MAESTRO_TASK_NAME);
maestroTask.setReferenceTaskName("maestroTask");
maestroTask.setWorkflowInstanceId(workflowId);
maestroTask.setScheduledTime(System.currentTimeMillis());
maestroTask.setTaskId(taskId);
maestroTask.setStatus(Task.Status.IN_PROGRESS);
maestroTask.setStartTime(0);
maestroTask.setCallbackAfterSeconds(0);
Workflow workflow = new Workflow();
workflow.setWorkflowId(workflowId);
workflow.setStatus(Workflow.WorkflowStatus.RUNNING);
when(executionDAOFacade.getTaskById(anyString())).thenReturn(maestroTask);
when(executionDAOFacade.getWorkflowById(anyString(), anyBoolean())).thenReturn(workflow);
maestroWorkflowExecutor.executeSystemTask(task2, taskId, 30);
assertEquals(Task.Status.IN_PROGRESS, maestroTask.getStatus());
assertEquals(1, maestroTask.getPollCount());
verify(executionDAOFacade, times(1)).updateTask(any());
assertTrue(maestroTask.getStartTime() > 0);
assertEquals(30, maestroTask.getCallbackAfterSeconds());
}
|
public static byte[][] readArrowBatches(ReadableByteChannel channel) throws IOException {
List<byte[]> results = new ArrayList<>();
byte[] batch;
while ((batch = readNextBatch(channel)) != null) {
results.add(batch);
}
return results.toArray(new byte[0][]);
}
|
@Test
void testReadArrowBatches() throws IOException {
VectorSchemaRoot root =
VectorSchemaRoot.create(ArrowUtils.toArrowSchema(rowType), allocator);
ArrowWriter<RowData> arrowWriter = ArrowUtils.createRowDataArrowWriter(root, rowType);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
ArrowStreamWriter arrowStreamWriter = new ArrowStreamWriter(root, null, baos);
arrowStreamWriter.start();
List<RowData> testData =
Arrays.asList(
new GenericRowData(rowType.getFieldCount()),
new GenericRowData(rowType.getFieldCount()),
new GenericRowData(rowType.getFieldCount()),
new GenericRowData(rowType.getFieldCount()),
new GenericRowData(rowType.getFieldCount()));
int batches = 3;
List<List<RowData>> subLists = Lists.partition(testData, testData.size() / batches + 1);
for (List<RowData> subList : subLists) {
for (RowData value : subList) {
arrowWriter.write(value);
}
arrowWriter.finish();
arrowStreamWriter.writeBatch();
arrowWriter.reset();
}
assertThat(
ArrowUtils.readArrowBatches(
Channels.newChannel(
new ByteArrayInputStream(baos.toByteArray())))
.length)
.isEqualTo(batches);
}
|
@Override
protected Optional<ErrorResponse> filter(DiscFilterRequest req) {
var now = clock.instant();
var bearerToken = requestBearerToken(req).orElse(null);
if (bearerToken == null) {
log.fine("Missing bearer token");
return Optional.of(new ErrorResponse(Response.Status.UNAUTHORIZED, "Unauthorized"));
}
var permission = Permission.getRequiredPermission(req).orElse(null);
if (permission == null) return Optional.of(new ErrorResponse(Response.Status.FORBIDDEN, "Forbidden"));
var requestTokenHash = requestTokenHash(bearerToken);
var clientIds = new TreeSet<String>();
var permissions = EnumSet.noneOf(Permission.class);
var matchedTokens = new HashSet<TokenVersion>();
for (Client c : allowedClients) {
if (!c.permissions().contains(permission)) continue;
var matchedToken = c.tokens().get(requestTokenHash);
if (matchedToken == null) continue;
var expiration = matchedToken.expiration().orElse(null);
if (expiration != null && now.isAfter(expiration)) continue;
matchedTokens.add(matchedToken);
clientIds.add(c.id());
permissions.addAll(c.permissions());
}
if (clientIds.isEmpty()) return Optional.of(new ErrorResponse(Response.Status.FORBIDDEN, "Forbidden"));
if (matchedTokens.size() > 1) {
log.warning("Multiple tokens matched for request %s"
.formatted(matchedTokens.stream().map(TokenVersion::id).toList()));
return Optional.of(new ErrorResponse(Response.Status.FORBIDDEN, "Forbidden"));
}
var matchedToken = matchedTokens.stream().findAny().get();
addAccessLogEntry(req, "token.id", matchedToken.id());
addAccessLogEntry(req, "token.hash", matchedToken.fingerprint().toDelimitedHexString());
addAccessLogEntry(req, "token.exp", matchedToken.expiration().map(Instant::toString).orElse("<none>"));
ClientPrincipal.attachToRequest(req, clientIds, permissions);
return Optional.empty();
}
|
@Test
void rejects_missing_tokens_on_empty_clients() {
var req = FilterTestUtils.newRequestBuilder()
.withMethod(Method.GET)
.build();
var responseHandler = new MockResponseHandler();
newFilterWithEmptyClientsConfig().filter(req, responseHandler);
assertNotNull(responseHandler.getResponse());
assertEquals(UNAUTHORIZED, responseHandler.getResponse().getStatus());
}
|
static void cleanStackTrace(Throwable throwable) {
new StackTraceCleaner(throwable).clean(Sets.<Throwable>newIdentityHashSet());
}
|
@Test
public void cyclesAreHandled() {
SelfReferencingThrowable selfReferencingThrowable =
new SelfReferencingThrowable("com.example.Foo", "org.junit.FilterMe");
StackTraceCleaner.cleanStackTrace(selfReferencingThrowable);
assertThat(selfReferencingThrowable.getStackTrace())
.isEqualTo(createStackTrace("com.example.Foo"));
}
|
public Span newChild(TraceContext parent) {
if (parent == null) throw new NullPointerException("parent == null");
return _toSpan(parent, decorateContext(parent, parent.spanId()));
}
|
@Test void newChild_resultantSpanIsLocalRoot() {
Span span = tracer.newChild(context);
assertThat(span.context().spanId()).isEqualTo(span.context().localRootId()); // Sanity check
assertThat(span.context().isLocalRoot()).isTrue();
// Check we don't always make children local roots
Span child = tracer.newChild(span.context());
assertThat(child.context().localRootId()).isEqualTo(span.context().localRootId());
assertThat(child.context().isLocalRoot()).isFalse();
}
|
static <T, W extends BoundedWindow> AssignWindowsRunner<T, W> create(
WindowFn<? super T, W> windowFn) {
// Safe contravariant cast
WindowFn<T, W> typedWindowFn = (WindowFn<T, W>) windowFn;
return new AssignWindowsRunner<>(typedWindowFn);
}
|
@Test
public void factoryCreatesFromKnownWindowFn() throws Exception {
SdkComponents components = SdkComponents.create();
components.registerEnvironment(Environments.createDockerEnvironment("java"));
PTransform windowPTransform =
PTransform.newBuilder()
.putInputs("in", "input")
.putOutputs("out", "output")
.setSpec(
FunctionSpec.newBuilder()
.setUrn(PTransformTranslation.ASSIGN_WINDOWS_TRANSFORM_URN)
.setPayload(
WindowIntoPayload.newBuilder()
.setWindowFn(
WindowingStrategyTranslation.toProto(
Sessions.withGapDuration(Duration.standardMinutes(12L)),
components))
.build()
.toByteString())
.build())
.build();
ThrowingFunction<WindowedValue<?>, WindowedValue<?>> fn =
(ThrowingFunction) factory.forPTransform("transform", windowPTransform);
WindowedValue<?> output =
fn.apply(
WindowedValue.of(
22L,
new Instant(5),
new IntervalWindow(new Instant(0L), new Instant(20027L)),
PaneInfo.ON_TIME_AND_ONLY_FIRING));
assertThat(
output,
equalTo(
WindowedValue.of(
22L,
new Instant(5),
new IntervalWindow(new Instant(5L), Duration.standardMinutes(12L)),
PaneInfo.ON_TIME_AND_ONLY_FIRING)));
}
|
@Override
public Result apply(String action, Class<? extends Validatable> aClass, String resource, String resourceToOperateWithin) {
if (matchesAction(action) && matchesType(aClass) && matchesResource(resource)) {
return Result.DENY;
}
if (isRequestForElasticAgentProfiles(aClass) && matchesAction(action) && matchesResource(resourceToOperateWithin)) {
return Result.DENY;
}
return Result.SKIP;
}
|
@Test
void forViewOfWildcardDefinedClusterProfile() {
Deny directive = new Deny("view", "cluster_profile", "team1_*");
Result viewAllElasticAgentProfiles = directive.apply("view", ElasticProfile.class, "*", null);
Result viewAllElasticAgentProfilesUnderTeam1 = directive.apply("view", ElasticProfile.class, "*", "team1_uat");
Result viewAllElasticAgentProfilesUnderTeam2 = directive.apply("view", ElasticProfile.class, "*", "team2_uat");
Result viewAllClusterProfiles = directive.apply("view", ClusterProfile.class, "*", null);
Result viewTeam1ClusterProfile = directive.apply("view", ClusterProfile.class, "team1_uat", null);
Result viewTeam2ClusterProfile = directive.apply("view", ClusterProfile.class, "team2_uat", null);
Result administerAllElasticAgentProfiles = directive.apply("administer", ElasticProfile.class, "*", null);
Result administerAllElasticAgentProfilesUnderTeam1 = directive.apply("administer", ElasticProfile.class, "*", "team1_uat");
Result administerAllElasticAgentProfilesUnderTeam2 = directive.apply("administer", ElasticProfile.class, "*", "team2_uat");
Result administerAllClusterProfiles = directive.apply("administer", ClusterProfile.class, "*", null);
Result administerTeam1ClusterProfile = directive.apply("administer", ClusterProfile.class, "team1_uat", null);
Result administerTeam2ClusterProfile = directive.apply("administer", ClusterProfile.class, "team2_uat", null);
assertThat(viewAllElasticAgentProfiles).isEqualTo(Result.SKIP);
assertThat(viewAllElasticAgentProfilesUnderTeam1).isEqualTo(Result.DENY);
assertThat(viewAllElasticAgentProfilesUnderTeam2).isEqualTo(Result.SKIP);
assertThat(viewAllClusterProfiles).isEqualTo(Result.SKIP);
assertThat(viewTeam1ClusterProfile).isEqualTo(Result.DENY);
assertThat(viewTeam2ClusterProfile).isEqualTo(Result.SKIP);
assertThat(administerAllElasticAgentProfiles).isEqualTo(Result.SKIP);
assertThat(administerAllElasticAgentProfilesUnderTeam1).isEqualTo(Result.SKIP);
assertThat(administerAllElasticAgentProfilesUnderTeam2).isEqualTo(Result.SKIP);
assertThat(administerAllClusterProfiles).isEqualTo(Result.SKIP);
assertThat(administerTeam1ClusterProfile).isEqualTo(Result.SKIP);
assertThat(administerTeam2ClusterProfile).isEqualTo(Result.SKIP);
}
|
public static MetadataUpdate fromJson(String json) {
return JsonUtil.parse(json, MetadataUpdateParser::fromJson);
}
|
@Test
public void testSetDefaultSortOrderFromJson() {
String action = MetadataUpdateParser.SET_DEFAULT_SORT_ORDER;
int sortOrderId = 2;
String json = String.format("{\"action\":\"%s\",\"sort-order-id\":%d}", action, sortOrderId);
MetadataUpdate expected = new MetadataUpdate.SetDefaultSortOrder(sortOrderId);
assertEquals(action, expected, MetadataUpdateParser.fromJson(json));
}
|
public boolean write(final int msgTypeId, final DirectBuffer srcBuffer, final int offset, final int length)
{
checkTypeId(msgTypeId);
checkMsgLength(length);
final AtomicBuffer buffer = this.buffer;
final int recordLength = length + HEADER_LENGTH;
final int recordIndex = claimCapacity(buffer, recordLength);
if (INSUFFICIENT_CAPACITY == recordIndex)
{
return false;
}
buffer.putIntOrdered(lengthOffset(recordIndex), -recordLength);
MemoryAccess.releaseFence();
buffer.putBytes(encodedMsgOffset(recordIndex), srcBuffer, offset, length);
buffer.putInt(typeOffset(recordIndex), msgTypeId);
buffer.putIntOrdered(lengthOffset(recordIndex), recordLength);
return true;
}
|
@Test
void shouldInsertPaddingRecordPlusMessageOnBufferWrapWithHeadEqualToTail()
{
final int length = 200;
final int recordLength = length + HEADER_LENGTH;
final int alignedRecordLength = align(recordLength, ALIGNMENT);
final long tail = CAPACITY - HEADER_LENGTH;
final long head = tail;
when(buffer.getLongVolatile(HEAD_COUNTER_INDEX)).thenReturn(head);
when(buffer.getLong(TAIL_COUNTER_INDEX)).thenReturn(tail);
final UnsafeBuffer srcBuffer = new UnsafeBuffer(allocateDirect(1024));
final int srcIndex = 0;
assertTrue(ringBuffer.write(MSG_TYPE_ID, srcBuffer, srcIndex, length));
final InOrder inOrder = inOrder(buffer);
inOrder.verify(buffer).putLongOrdered(TAIL_COUNTER_INDEX, tail + alignedRecordLength + HEADER_LENGTH);
inOrder.verify(buffer).putLong(0, 0L);
inOrder.verify(buffer).putIntOrdered(lengthOffset((int)tail), -HEADER_LENGTH);
inOrder.verify(buffer).putInt(typeOffset((int)tail), PADDING_MSG_TYPE_ID);
inOrder.verify(buffer).putIntOrdered(lengthOffset((int)tail), HEADER_LENGTH);
inOrder.verify(buffer).putLong(alignedRecordLength, 0L);
inOrder.verify(buffer).putIntOrdered(lengthOffset(0), -recordLength);
inOrder.verify(buffer).putBytes(encodedMsgOffset(0), srcBuffer, srcIndex, length);
inOrder.verify(buffer).putInt(typeOffset(0), MSG_TYPE_ID);
inOrder.verify(buffer).putIntOrdered(lengthOffset(0), recordLength);
}
|
public boolean unregisterOutput(String id) {
final PartialWatermark output = watermarkPerOutputId.remove(id);
if (output != null) {
combinedWatermarkStatus.remove(output);
return true;
} else {
return false;
}
}
|
@Test
void testRemoveNotRegisteredReturnValue() {
final TestingWatermarkOutput underlyingWatermarkOutput = createTestingWatermarkOutput();
final WatermarkOutputMultiplexer multiplexer =
new WatermarkOutputMultiplexer(underlyingWatermarkOutput);
final boolean unregistered = multiplexer.unregisterOutput("does-not-exist");
assertThat(unregistered).isFalse();
}
|
@SuppressWarnings("unused")
@Restricted(NoExternalUse.class)
public static String getSymbol(String name, String title, String tooltip, String htmlTooltip, String classes, String pluginName, String id) {
return Symbol.get(new SymbolRequest.Builder()
.withName(IconSet.cleanName(name))
.withTitle(title)
.withTooltip(tooltip)
.withHtmlTooltip(htmlTooltip)
.withClasses(classes)
.withPluginName(pluginName)
.withId(id)
.build()
);
}
|
@Test
void getSymbol_cachedSymbolDoesntReturnAttributes() {
IconSet.getSymbol("download", "Title", "Tooltip", "", "class1 class2", "", "id");
String symbol = IconSet.getSymbol("download", "", "", "", "", "", "");
assertThat(symbol, not(containsString("<span class=\"jenkins-visually-hidden\">Title</span>")));
assertThat(symbol, not(containsString("tooltip=\"Tooltip\"")));
assertThat(symbol, not(containsString("class=\"class1 class2\"")));
assertThat(symbol, not(containsString("id=\"id\"")));
}
|
public static String buildErrorMessage(final Throwable throwable) {
if (throwable == null) {
return "";
}
final List<String> messages = dedup(getErrorMessages(throwable));
final String msg = messages.remove(0);
final String causeMsg = messages.stream()
.filter(s -> !s.isEmpty())
.map(cause -> WordUtils.wrap(PREFIX + cause, 80, "\n\t", true))
.collect(Collectors.joining(System.lineSeparator()));
return causeMsg.isEmpty() ? msg : msg + System.lineSeparator() + causeMsg;
}
|
@Test
public void shouldNotDeduplicateMessageIfNextMessageIsLonger() {
final Throwable cause = new TestException("Something went wrong");
final Throwable subLevel1 = new TestException("Some Message with more detail", cause);
final Throwable e = new TestException("Some Message", subLevel1);
assertThat(
buildErrorMessage(e),
is("Some Message" + System.lineSeparator()
+ "Caused by: Some Message with more detail" + System.lineSeparator()
+ "Caused by: Something went wrong")
);
}
|
@SuppressWarnings("unchecked")
@SneakyThrows(ReflectiveOperationException.class)
public static <T> T getStaticFieldValue(final Class<?> target, final String fieldName) {
Field field = target.getDeclaredField(fieldName);
boolean accessible = field.isAccessible();
if (!accessible) {
field.setAccessible(true);
}
T result = (T) field.get(target);
if (!accessible) {
field.setAccessible(false);
}
return result;
}
|
@Test
void assertGetStaticFieldValue() {
assertThat(ReflectionUtils.getStaticFieldValue(ReflectionFixture.class, "staticValue"), is("static_value"));
}
|
public final void containsEntry(@Nullable Object key, @Nullable Object value) {
Map.Entry<@Nullable Object, @Nullable Object> entry = immutableEntry(key, value);
checkNotNull(actual);
if (!actual.entrySet().contains(entry)) {
List<@Nullable Object> keyList = singletonList(key);
List<@Nullable Object> valueList = singletonList(value);
if (actual.containsKey(key)) {
Object actualValue = actual.get(key);
/*
* In the case of a null expected or actual value, clarify that the key *is* present and
* *is* expected to be present. That is, get() isn't returning null to indicate that the key
* is missing, and the user isn't making an assertion that the key is missing.
*/
StandardSubjectBuilder check = check("get(%s)", key);
if (value == null || actualValue == null) {
check = check.withMessage("key is present but with a different value");
}
// See the comment on IterableSubject's use of failEqualityCheckForEqualsWithoutDescription.
check.that(actualValue).failEqualityCheckForEqualsWithoutDescription(value);
} else if (hasMatchingToStringPair(actual.keySet(), keyList)) {
failWithoutActual(
fact("expected to contain entry", entry),
fact("an instance of", objectToTypeName(entry)),
simpleFact("but did not"),
fact(
"though it did contain keys",
countDuplicatesAndAddTypeInfo(
retainMatchingToString(actual.keySet(), /* itemsToCheck= */ keyList))),
fact("full contents", actualCustomStringRepresentationForPackageMembersToCall()));
} else if (actual.containsValue(value)) {
Set<@Nullable Object> keys = new LinkedHashSet<>();
for (Map.Entry<?, ?> actualEntry : actual.entrySet()) {
if (Objects.equal(actualEntry.getValue(), value)) {
keys.add(actualEntry.getKey());
}
}
failWithoutActual(
fact("expected to contain entry", entry),
simpleFact("but did not"),
fact("though it did contain keys with that value", keys),
fact("full contents", actualCustomStringRepresentationForPackageMembersToCall()));
} else if (hasMatchingToStringPair(actual.values(), valueList)) {
failWithoutActual(
fact("expected to contain entry", entry),
fact("an instance of", objectToTypeName(entry)),
simpleFact("but did not"),
fact(
"though it did contain values",
countDuplicatesAndAddTypeInfo(
retainMatchingToString(actual.values(), /* itemsToCheck= */ valueList))),
fact("full contents", actualCustomStringRepresentationForPackageMembersToCall()));
} else {
failWithActual("expected to contain entry", entry);
}
}
}
|
@Test
public void failMapContainsKeyWithNullValuePresentExpected() {
Map<String, String> actual = Maps.newHashMap();
actual.put("a", null);
expectFailureWhenTestingThat(actual).containsEntry("a", "A");
assertFailureValue("value of", "map.get(a)");
assertFailureValue("expected", "A");
assertFailureValue("but was", "null");
assertFailureValue("map was", "{a=null}");
assertThat(expectFailure.getFailure())
.hasMessageThat()
.contains(KEY_IS_PRESENT_WITH_DIFFERENT_VALUE);
}
|
public static Ip6Prefix valueOf(byte[] address, int prefixLength) {
return new Ip6Prefix(Ip6Address.valueOf(address), prefixLength);
}
|
@Test(expected = NullPointerException.class)
public void testInvalidValueOfNullString() {
Ip6Prefix ipPrefix;
String fromString;
fromString = null;
ipPrefix = Ip6Prefix.valueOf(fromString);
}
|
void start() throws IOException, InterruptedException {
startUpdater();
Options options = new Options()
.withHost(null) // wildcard any-address binding
.withPort(port)
.withReuseAddr(true)
.withReusePort(true)
.withAcceptLength(8_192)
.withMaxRequestSize(1_024 * 1_024)
.withReadBufferSize(1_024 * 64)
.withResolution(Duration.ofMillis(1_000))
.withRequestTimeout(Duration.ofSeconds(90));
EventLoop eventLoop = new EventLoop(options, new DisabledLogger(), this::handle);
eventLoop.start();
eventLoop.join();
}
|
@Test
void plainTextAndJson() throws IOException, InterruptedException {
HelloWebServer server = new HelloWebServer(8080);
Runnable task = () -> {
try {
server.start();
} catch (IOException e) {
throw new RuntimeException(e);
}
};
Thread thread = new Thread(task);
thread.setDaemon(true);
thread.start();
HttpClient client = HttpClient.newBuilder()
.version(HttpClient.Version.HTTP_1_1)
.build();
verifyPlainText(client);
verifyJson(client);
verifyOther(client);
}
|
public static byte[] getNullableSizePrefixedArray(final ByteBuffer buffer) {
final int size = buffer.getInt();
return getNullableArray(buffer, size);
}
|
@Test
public void getNullableSizePrefixedArrayExactEmpty() {
byte[] input = {0, 0, 0, 0};
final ByteBuffer buffer = ByteBuffer.wrap(input);
final byte[] array = Utils.getNullableSizePrefixedArray(buffer);
assertArrayEquals(new byte[] {}, array);
assertEquals(4, buffer.position());
assertFalse(buffer.hasRemaining());
}
|
@Override
public <VO, VR> KStream<K, VR> leftJoin(final KStream<K, VO> otherStream,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
final JoinWindows windows) {
return leftJoin(otherStream, toValueJoinerWithKey(joiner), windows);
}
|
@Test
public void shouldNotAllowNullMapperOnLeftJoinWithGlobalTableWithNamed() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.leftJoin(
testGlobalTable,
null,
MockValueJoiner.TOSTRING_JOINER,
Named.as("name")));
assertThat(exception.getMessage(), equalTo("keySelector can't be null"));
}
|
@Override
public PluginDescriptor find(Path pluginPath) {
for (PluginDescriptorFinder finder : finders) {
if (finder.isApplicable(pluginPath)) {
log.debug("'{}' is applicable for plugin '{}'", finder, pluginPath);
try {
PluginDescriptor pluginDescriptor = finder.find(pluginPath);
if (pluginDescriptor != null) {
return pluginDescriptor;
}
} catch (Exception e) {
if (finders.indexOf(finder) == finders.size() - 1) {
// it's the last finder
log.error(e.getMessage(), e);
} else {
// log the exception and continue with the next finder
log.debug(e.getMessage());
log.debug("Try to continue with the next finder");
}
}
} else {
log.debug("'{}' is not applicable for plugin '{}'", finder, pluginPath);
}
}
throw new PluginRuntimeException("No PluginDescriptorFinder for plugin '{}'", pluginPath);
}
|
@Test
public void find() throws Exception {
Path pluginPath = Files.createDirectories(pluginsPath.resolve("test-plugin-1"));
storePropertiesToPath(getPlugin1Properties(), pluginPath);
PluginDescriptorFinder descriptorFinder = new CompoundPluginDescriptorFinder()
.add(new PropertiesPluginDescriptorFinder());
PluginDescriptor pluginDescriptor = descriptorFinder.find(pluginPath);
assertNotNull(pluginDescriptor);
assertEquals("test-plugin-1", pluginDescriptor.getPluginId());
assertEquals("0.0.1", pluginDescriptor.getVersion());
}
|
static void checkValidCollectionName(String databaseName, String collectionName) {
String fullCollectionName = databaseName + "." + collectionName;
if (collectionName.length() < MIN_COLLECTION_NAME_LENGTH) {
throw new IllegalArgumentException("Collection name cannot be empty.");
}
if (fullCollectionName.length() > MAX_COLLECTION_NAME_LENGTH) {
throw new IllegalArgumentException(
"Collection name "
+ fullCollectionName
+ " cannot be longer than "
+ MAX_COLLECTION_NAME_LENGTH
+ " characters, including the database name and dot.");
}
if (ILLEGAL_COLLECTION_CHARS.matcher(collectionName).find()) {
throw new IllegalArgumentException(
"Collection name "
+ collectionName
+ " is not a valid name. Only letters, numbers, hyphens, underscores and exclamation points are allowed.");
}
if (collectionName.charAt(0) != '_' && !Character.isLetter(collectionName.charAt(0))) {
throw new IllegalArgumentException(
"Collection name " + collectionName + " must start with a letter or an underscore.");
}
String illegalKeyword = "system.";
if (collectionName.startsWith(illegalKeyword)) {
throw new IllegalArgumentException(
"Collection name "
+ collectionName
+ " cannot start with the prefix \""
+ illegalKeyword
+ "\".");
}
}
|
@Test
public void testCheckValidCollectionNameThrowsErrorWhenNameBeginsWithSystemKeyword() {
assertThrows(
IllegalArgumentException.class,
() -> checkValidCollectionName("test-database", "system.test-collection"));
}
|
public int getNumber4()
{
checkAvailable(4);
return get(Wire::getUInt32, 4);
}
|
@Test
public void testGetInt()
{
ZFrame frame = new ZFrame(new byte[4]);
ZNeedle needle = new ZNeedle(frame);
assertThat(needle.getNumber4(), is(0));
}
|
public boolean matches(KeyEvent e)
{
return matches(e, false);
}
|
@Test
public void testUnknownKey()
{
Keybind keybind = new Keybind(VK_UNDEFINED, CTRL_DOWN_MASK);
keybind.matches(createKeyEvent(KEY_RELEASED, 0, VK_UNDEFINED));
}
|
@PostMapping("/token")
@PermitAll
@Operation(summary = "获得访问令牌", description = "适合 code 授权码模式,或者 implicit 简化模式;在 sso.vue 单点登录界面被【获取】调用")
@Parameters({
@Parameter(name = "grant_type", required = true, description = "授权类型", example = "code"),
@Parameter(name = "code", description = "授权范围", example = "userinfo.read"),
@Parameter(name = "redirect_uri", description = "重定向 URI", example = "https://www.iocoder.cn"),
@Parameter(name = "state", description = "状态", example = "1"),
@Parameter(name = "username", example = "tudou"),
@Parameter(name = "password", example = "cai"), // 多个使用空格分隔
@Parameter(name = "scope", example = "user_info"),
@Parameter(name = "refresh_token", example = "123424233"),
})
public CommonResult<OAuth2OpenAccessTokenRespVO> postAccessToken(HttpServletRequest request,
@RequestParam("grant_type") String grantType,
@RequestParam(value = "code", required = false) String code, // 授权码模式
@RequestParam(value = "redirect_uri", required = false) String redirectUri, // 授权码模式
@RequestParam(value = "state", required = false) String state, // 授权码模式
@RequestParam(value = "username", required = false) String username, // 密码模式
@RequestParam(value = "password", required = false) String password, // 密码模式
@RequestParam(value = "scope", required = false) String scope, // 密码模式
@RequestParam(value = "refresh_token", required = false) String refreshToken) { // 刷新模式
List<String> scopes = OAuth2Utils.buildScopes(scope);
// 1.1 校验授权类型
OAuth2GrantTypeEnum grantTypeEnum = OAuth2GrantTypeEnum.getByGrantType(grantType);
if (grantTypeEnum == null) {
throw exception0(BAD_REQUEST.getCode(), StrUtil.format("未知授权类型({})", grantType));
}
if (grantTypeEnum == OAuth2GrantTypeEnum.IMPLICIT) {
throw exception0(BAD_REQUEST.getCode(), "Token 接口不支持 implicit 授权模式");
}
// 1.2 校验客户端
String[] clientIdAndSecret = obtainBasicAuthorization(request);
OAuth2ClientDO client = oauth2ClientService.validOAuthClientFromCache(clientIdAndSecret[0], clientIdAndSecret[1],
grantType, scopes, redirectUri);
// 2. 根据授权模式,获取访问令牌
OAuth2AccessTokenDO accessTokenDO;
switch (grantTypeEnum) {
case AUTHORIZATION_CODE:
accessTokenDO = oauth2GrantService.grantAuthorizationCodeForAccessToken(client.getClientId(), code, redirectUri, state);
break;
case PASSWORD:
accessTokenDO = oauth2GrantService.grantPassword(username, password, client.getClientId(), scopes);
break;
case CLIENT_CREDENTIALS:
accessTokenDO = oauth2GrantService.grantClientCredentials(client.getClientId(), scopes);
break;
case REFRESH_TOKEN:
accessTokenDO = oauth2GrantService.grantRefreshToken(refreshToken, client.getClientId());
break;
default:
throw new IllegalArgumentException("未知授权类型:" + grantType);
}
Assert.notNull(accessTokenDO, "访问令牌不能为空"); // 防御性检查
return success(OAuth2OpenConvert.INSTANCE.convert(accessTokenDO));
}
|
@Test
public void testPostAccessToken_implicit() {
// 调用,并断言
assertServiceException(() -> oauth2OpenController.postAccessToken(null,
OAuth2GrantTypeEnum.IMPLICIT.getGrantType(), null, null, null,
null, null, null, null),
new ErrorCode(400, "Token 接口不支持 implicit 授权模式"));
}
|
public void getConfig(StorDistributionConfig.Group.Builder builder) {
builder.index(index == null ? "invalid" : index);
builder.name(name == null ? "invalid" : name);
partitions.ifPresent(builder::partitions);
for (StorageNode node : nodes) {
StorDistributionConfig.Group.Nodes.Builder nb = new StorDistributionConfig.Group.Nodes.Builder();
nb.index(node.getDistributionKey());
nb.retired(node.isRetired());
builder.nodes.add(nb);
}
builder.capacity(getCapacity());
}
|
@Test
void testNestedGroups() throws Exception {
ContentCluster cluster = parse(
"<content version=\"1.0\" id=\"storage\">\n" +
" <redundancy>4</redundancy>" +
" <documents/>" +
" <group>\n" +
" <distribution partitions=\"1|*\"/>\n" +
" <group distribution-key=\"0\" name=\"sub1\">\n" +
" <node hostalias=\"mockhost\" distribution-key=\"0\"/>\n" +
" <node hostalias=\"mockhost\" distribution-key=\"1\"/>\n" +
" </group>\n" +
" <group distribution-key=\"1\" name=\"sub2\">\n" +
" <distribution partitions=\"1|*\"/>\n" +
" <group distribution-key=\"0\" name=\"sub3\">\n" +
" <node hostalias=\"mockhost\" distribution-key=\"2\"/>\n" +
" <node hostalias=\"mockhost\" distribution-key=\"3\"/>\n" +
" </group>\n" +
" <group distribution-key=\"1\" name=\"sub4\">\n" +
" <node hostalias=\"mockhost\" distribution-key=\"4\"/>\n" +
" <node hostalias=\"mockhost\" distribution-key=\"5\"/>\n" +
" </group>\n" +
" </group>\n" +
" </group>\n" +
"</content>"
);
StorDistributionConfig.Builder builder = new StorDistributionConfig.Builder();
cluster.getConfig(builder);
StorDistributionConfig config = new StorDistributionConfig(builder);
assertEquals(5, config.group().size());
assertEquals("invalid", config.group(0).index());
assertEquals("0", config.group(1).index());
assertEquals("1", config.group(2).index());
assertEquals("1.0", config.group(3).index());
assertEquals("1.1", config.group(4).index());
assertEquals("invalid", config.group(0).name());
assertEquals("sub1", config.group(1).name());
assertEquals("sub2", config.group(2).name());
assertEquals("sub3", config.group(3).name());
assertEquals("sub4", config.group(4).name());
assertEquals(2, config.group(1).nodes().size());
assertEquals(0, config.group(1).nodes(0).index());
assertEquals(1, config.group(1).nodes(1).index());
assertEquals(0, config.group(2).nodes().size());
assertEquals(2, config.group(3).nodes().size());
assertEquals(2, config.group(3).nodes(0).index());
assertEquals(3, config.group(3).nodes(1).index());
assertEquals(2, config.group(4).nodes().size());
assertEquals(4, config.group(4).nodes(0).index());
assertEquals(5, config.group(4).nodes(1).index());
assertEquals("1|*", config.group(0).partitions());
DistributionConfig.Builder distributionBuilder = new DistributionConfig.Builder();
cluster.getConfig(distributionBuilder);
DistributionConfig.Cluster clusterConfig = distributionBuilder.build().cluster("storage");
assertEquals(5, clusterConfig.group().size());
assertEquals("invalid", clusterConfig.group(0).index());
assertEquals("0", clusterConfig.group(1).index());
assertEquals("1", clusterConfig.group(2).index());
assertEquals("1.0", clusterConfig.group(3).index());
assertEquals("1.1", clusterConfig.group(4).index());
assertEquals("invalid", clusterConfig.group(0).name());
assertEquals("sub1", clusterConfig.group(1).name());
assertEquals("sub2", clusterConfig.group(2).name());
assertEquals("sub3", clusterConfig.group(3).name());
assertEquals("sub4", clusterConfig.group(4).name());
assertEquals(2, clusterConfig.group(1).nodes().size());
assertEquals(0, clusterConfig.group(1).nodes(0).index());
assertEquals(1, clusterConfig.group(1).nodes(1).index());
assertEquals(0, clusterConfig.group(2).nodes().size());
assertEquals(2, clusterConfig.group(3).nodes().size());
assertEquals(2, clusterConfig.group(3).nodes(0).index());
assertEquals(3, clusterConfig.group(3).nodes(1).index());
assertEquals(2, clusterConfig.group(4).nodes().size());
assertEquals(4, clusterConfig.group(4).nodes(0).index());
assertEquals(5, clusterConfig.group(4).nodes(1).index());
assertEquals("1|*", clusterConfig.group(0).partitions());
}
|
public Set<String> extractPlaceholderKeys(String propertyString) {
Set<String> placeholderKeys = Sets.newHashSet();
if (!isPlaceholder(propertyString)) {
return placeholderKeys;
}
Stack<String> stack = new Stack<>();
stack.push(propertyString);
while (!stack.isEmpty()) {
String strVal = stack.pop();
int startIndex = strVal.indexOf(PLACEHOLDER_PREFIX);
if (startIndex == -1) {
placeholderKeys.add(strVal);
continue;
}
int endIndex = findPlaceholderEndIndex(strVal, startIndex);
if (endIndex == -1) {
// invalid placeholder?
continue;
}
String placeholderCandidate = strVal.substring(startIndex + PLACEHOLDER_PREFIX.length(), endIndex);
// ${some.key:other.key}
if (placeholderCandidate.startsWith(PLACEHOLDER_PREFIX)) {
stack.push(placeholderCandidate);
}
else {
// some.key:${some.other.key:100}
int separatorIndex = placeholderCandidate.indexOf(VALUE_SEPARATOR);
if (separatorIndex == -1) {
stack.push(placeholderCandidate);
}
else {
stack.push(placeholderCandidate.substring(0, separatorIndex));
String defaultValuePart =
normalizeToPlaceholder(placeholderCandidate.substring(separatorIndex + VALUE_SEPARATOR.length()));
if (!Strings.isNullOrEmpty(defaultValuePart)) {
stack.push(defaultValuePart);
}
}
}
// has remaining part, e.g. ${a}.${b}
if (endIndex + PLACEHOLDER_SUFFIX.length() < strVal.length() - 1) {
String remainingPart = normalizeToPlaceholder(strVal.substring(endIndex + PLACEHOLDER_SUFFIX.length()));
if (!Strings.isNullOrEmpty(remainingPart)) {
stack.push(remainingPart);
}
}
}
return placeholderKeys;
}
|
@Test
public void extractNormalPlaceholderKeysTest() {
final String placeholderCase = "${some.key}";
final String placeholderCase1 = "${some.key:${some.other.key:100}}";
final String placeholderCase2 = "${${some.key}}";
final String placeholderCase3 = "${${some.key:other.key}}";
final String placeholderCase4 = "${${some.key}:${another.key}}";
final String placeholderCase5 = "#{new java.text.SimpleDateFormat('${some.key}').parse('${another.key}')}";
Set<String> placeholderKeys = PLACEHOLDER_HELPER.extractPlaceholderKeys(placeholderCase);
assertThat(placeholderKeys.size()).isEqualTo(1);
assertThat(placeholderKeys).contains("some.key");
Set<String> placeholderKeys1 = PLACEHOLDER_HELPER.extractPlaceholderKeys(placeholderCase1);
assertThat(placeholderKeys1.size()).isEqualTo(2);
assertThat(placeholderKeys1).contains("some.key");
assertThat(placeholderKeys1).contains("some.other.key");
Set<String> placeholderKeys2 = PLACEHOLDER_HELPER.extractPlaceholderKeys(placeholderCase2);
assertThat(placeholderKeys2.size()).isEqualTo(1);
assertThat(placeholderKeys2).contains("some.key");
Set<String> placeholderKeys3 = PLACEHOLDER_HELPER.extractPlaceholderKeys(placeholderCase3);
assertThat(placeholderKeys3.size()).isEqualTo(1);
assertThat(placeholderKeys3).contains("some.key");
Set<String> placeholderKeys4 = PLACEHOLDER_HELPER.extractPlaceholderKeys(placeholderCase4);
assertThat(placeholderKeys4.size()).isEqualTo(2);
assertThat(placeholderKeys4).contains("some.key");
assertThat(placeholderKeys4).contains("another.key");
Set<String> placeholderKeys5 = PLACEHOLDER_HELPER.extractPlaceholderKeys(placeholderCase5);
assertThat(placeholderKeys5.size()).isEqualTo(2);
assertThat(placeholderKeys5).contains("some.key");
assertThat(placeholderKeys5).contains("another.key");
}
|
public static Class<?> desc2class(String desc) throws ClassNotFoundException {
return desc2class(ClassUtils.getClassLoader(), desc);
}
|
@Test
void testDesc2Class() throws Exception {
assertEquals(void.class, ReflectUtils.desc2class("V"));
assertEquals(boolean.class, ReflectUtils.desc2class("Z"));
assertEquals(boolean[].class, ReflectUtils.desc2class("[Z"));
assertEquals(byte.class, ReflectUtils.desc2class("B"));
assertEquals(char.class, ReflectUtils.desc2class("C"));
assertEquals(double.class, ReflectUtils.desc2class("D"));
assertEquals(float.class, ReflectUtils.desc2class("F"));
assertEquals(int.class, ReflectUtils.desc2class("I"));
assertEquals(long.class, ReflectUtils.desc2class("J"));
assertEquals(short.class, ReflectUtils.desc2class("S"));
assertEquals(String.class, ReflectUtils.desc2class("Ljava.lang.String;"));
assertEquals(int[][].class, ReflectUtils.desc2class(ReflectUtils.getDesc(int[][].class)));
assertEquals(ReflectUtilsTest[].class, ReflectUtils.desc2class(ReflectUtils.getDesc(ReflectUtilsTest[].class)));
String desc;
Class<?>[] cs;
cs = new Class<?>[] {int.class, getClass(), String.class, int[][].class, boolean[].class};
desc = ReflectUtils.getDesc(cs);
assertSame(cs, ReflectUtils.desc2classArray(desc));
cs = new Class<?>[] {};
desc = ReflectUtils.getDesc(cs);
assertSame(cs, ReflectUtils.desc2classArray(desc));
cs = new Class<?>[] {void.class, String[].class, int[][].class, ReflectUtilsTest[][].class};
desc = ReflectUtils.getDesc(cs);
assertSame(cs, ReflectUtils.desc2classArray(desc));
}
|
@Override
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
if ("equals".equals(method.getName())) {
try {
Object otherHandler =
args.length > 0 && args[0] != null ? Proxy.getInvocationHandler(args[0]) : null;
return equals(otherHandler);
} catch (IllegalArgumentException e) {
return false;
}
} else if ("hashCode".equals(method.getName())) {
return hashCode();
} else if ("toString".equals(method.getName())) {
return toString();
} else if (!dispatch.containsKey(method)) {
throw new UnsupportedOperationException(
String.format("Method \"%s\" should not be called", method.getName()));
}
return this.invoke(method, this.dispatch.get(method), args);
}
|
@Test
void invokeFailureReactor() throws Throwable {
given(this.methodHandler.invoke(any())).willThrow(new IOException("Could Not Decode"));
ReactorInvocationHandler handler = new ReactorInvocationHandler(this.target,
Collections.singletonMap(this.method, this.methodHandler), Schedulers.boundedElastic());
Object result = handler.invoke(this.method, this.methodHandler, new Object[] {});
assertThat(result).isInstanceOf(Mono.class);
verifyNoInteractions(this.methodHandler);
/* subscribe and execute the method, should result in an error */
StepVerifier.create((Mono) result)
.expectError(IOException.class)
.verify();
verify(this.methodHandler, times(1)).invoke(any());
}
|
@Override
public TypeMapping createTypeMapping(IndexMainType mainType) {
checkState(this.mainType == null, "Main type can only be defined once");
this.mainType = mainType;
return super.createTypeMapping(mainType);
}
|
@Test
public void createTypeMapping_with_IndexRelationType_fails_with_ISE_if_called_before_createType_with_IndexMainType() {
Index index = Index.withRelations(SOME_INDEX_NAME);
NewRegularIndex underTest = new NewRegularIndex(index, defaultSettingsConfiguration);
assertThatThrownBy(() -> underTest.createTypeMapping(IndexType.relation(IndexType.main(index, "foo"), "bar")))
.isInstanceOf(IllegalStateException.class)
.hasMessage("Mapping for main type must be created first");
}
|
@Override
public <T> @NonNull Schema schemaFor(TypeDescriptor<T> typeDescriptor) {
return schemaFor(typeDescriptor.getRawType());
}
|
@Test
public void testMainStructSchemaWithContainerTypedefRegistered() {
final Schema schema = customSchemaProvider.schemaFor(TypeDescriptor.of(TestThriftStruct.class));
assertNotNull(schema);
assertEquals(TypeName.BOOLEAN, schema.getField("testBool").getType().getTypeName());
assertEquals(TypeName.BYTE, schema.getField("testByte").getType().getTypeName());
assertEquals(TypeName.INT16, schema.getField("testShort").getType().getTypeName());
assertEquals(TypeName.INT32, schema.getField("testInt").getType().getTypeName());
assertEquals(TypeName.INT64, schema.getField("testLong").getType().getTypeName());
assertEquals(TypeName.DOUBLE, schema.getField("testDouble").getType().getTypeName());
assertEquals(TypeName.BYTES, schema.getField("testBinary").getType().getTypeName());
assertEquals(TypeName.MAP, schema.getField("stringIntMap").getType().getTypeName());
assertEquals(TypeName.LOGICAL_TYPE, schema.getField("testEnum").getType().getTypeName());
assertEquals(
EnumerationType.IDENTIFIER,
schema.getField("testEnum").getType().getLogicalType().getIdentifier());
assertEquals(TypeName.ARRAY, schema.getField("testList").getType().getTypeName());
assertEquals(TypeName.ROW, schema.getField("testNested").getType().getTypeName());
assertEquals(
TypeName.ITERABLE, schema.getField("testStringSetTypedef").getType().getTypeName());
}
|
public static Builder builder() {
return new Builder();
}
|
@Test
// Test cases that can't be constructed with our Builder class but that will parse correctly
public void testCanDeserializeWithoutDefaultValues() throws JsonProcessingException {
// Name and schema are only two required fields
String jsonOnlyRequiredFieldsMissingDefaults =
String.format(
"{\"name\":\"%s\",\"schema\":%s}", SAMPLE_NAME, SchemaParser.toJson(SAMPLE_SCHEMA));
CreateTableRequest reqOnlyRequiredFieldsMissingDefaults =
CreateTableRequest.builder().withName(SAMPLE_NAME).withSchema(SAMPLE_SCHEMA).build();
assertEquals(
deserialize(jsonOnlyRequiredFieldsMissingDefaults), reqOnlyRequiredFieldsMissingDefaults);
}
|
@VisibleForTesting
public static void validateIngestionConfig(TableConfig tableConfig, @Nullable Schema schema) {
IngestionConfig ingestionConfig = tableConfig.getIngestionConfig();
if (ingestionConfig != null) {
String tableNameWithType = tableConfig.getTableName();
// Batch
if (ingestionConfig.getBatchIngestionConfig() != null) {
BatchIngestionConfig cfg = ingestionConfig.getBatchIngestionConfig();
List<Map<String, String>> batchConfigMaps = cfg.getBatchConfigMaps();
try {
if (CollectionUtils.isNotEmpty(batchConfigMaps)) {
// Validate that BatchConfig can be created
batchConfigMaps.forEach(b -> new BatchConfig(tableNameWithType, b));
}
} catch (Exception e) {
throw new IllegalStateException("Could not create BatchConfig using the batchConfig map", e);
}
if (tableConfig.isDimTable()) {
Preconditions.checkState(cfg.getSegmentIngestionType().equalsIgnoreCase("REFRESH"),
"Dimension tables must have segment ingestion type REFRESH");
}
}
if (tableConfig.isDimTable()) {
Preconditions.checkState(ingestionConfig.getBatchIngestionConfig() != null,
"Dimension tables must have batch ingestion configuration");
}
// Stream
// stream config map can either be in ingestion config or indexing config. cannot be in both places
if (ingestionConfig.getStreamIngestionConfig() != null) {
IndexingConfig indexingConfig = tableConfig.getIndexingConfig();
Preconditions.checkState(indexingConfig == null || MapUtils.isEmpty(indexingConfig.getStreamConfigs()),
"Should not use indexingConfig#getStreamConfigs if ingestionConfig#StreamIngestionConfig is provided");
List<Map<String, String>> streamConfigMaps = ingestionConfig.getStreamIngestionConfig().getStreamConfigMaps();
Preconditions.checkState(streamConfigMaps.size() == 1, "Only 1 stream is supported in REALTIME table");
}
// Filter config
FilterConfig filterConfig = ingestionConfig.getFilterConfig();
if (filterConfig != null) {
String filterFunction = filterConfig.getFilterFunction();
if (filterFunction != null) {
if (_disableGroovy && FunctionEvaluatorFactory.isGroovyExpression(filterFunction)) {
throw new IllegalStateException(
"Groovy filter functions are disabled for table config. Found '" + filterFunction + "'");
}
try {
FunctionEvaluatorFactory.getExpressionEvaluator(filterFunction);
} catch (Exception e) {
throw new IllegalStateException("Invalid filter function " + filterFunction, e);
}
}
}
// Aggregation configs
List<AggregationConfig> aggregationConfigs = ingestionConfig.getAggregationConfigs();
Set<String> aggregationSourceColumns = new HashSet<>();
if (!CollectionUtils.isEmpty(aggregationConfigs)) {
Preconditions.checkState(!tableConfig.getIndexingConfig().isAggregateMetrics(),
"aggregateMetrics cannot be set with AggregationConfig");
Set<String> aggregationColumns = new HashSet<>();
for (AggregationConfig aggregationConfig : aggregationConfigs) {
String columnName = aggregationConfig.getColumnName();
String aggregationFunction = aggregationConfig.getAggregationFunction();
if (columnName == null || aggregationFunction == null) {
throw new IllegalStateException(
"columnName/aggregationFunction cannot be null in AggregationConfig " + aggregationConfig);
}
FieldSpec fieldSpec = null;
if (schema != null) {
fieldSpec = schema.getFieldSpecFor(columnName);
Preconditions.checkState(fieldSpec != null, "The destination column '" + columnName
+ "' of the aggregation function must be present in the schema");
Preconditions.checkState(fieldSpec.getFieldType() == FieldSpec.FieldType.METRIC,
"The destination column '" + columnName + "' of the aggregation function must be a metric column");
}
if (!aggregationColumns.add(columnName)) {
throw new IllegalStateException("Duplicate aggregation config found for column '" + columnName + "'");
}
ExpressionContext expressionContext;
try {
expressionContext = RequestContextUtils.getExpression(aggregationConfig.getAggregationFunction());
} catch (Exception e) {
throw new IllegalStateException(
"Invalid aggregation function '" + aggregationFunction + "' for column '" + columnName + "'", e);
}
Preconditions.checkState(expressionContext.getType() == ExpressionContext.Type.FUNCTION,
"aggregation function must be a function for: %s", aggregationConfig);
FunctionContext functionContext = expressionContext.getFunction();
AggregationFunctionType functionType =
AggregationFunctionType.getAggregationFunctionType(functionContext.getFunctionName());
validateIngestionAggregation(functionType);
List<ExpressionContext> arguments = functionContext.getArguments();
int numArguments = arguments.size();
if (functionType == DISTINCTCOUNTHLL) {
Preconditions.checkState(numArguments >= 1 && numArguments <= 2,
"DISTINCT_COUNT_HLL can have at most two arguments: %s", aggregationConfig);
if (numArguments == 2) {
ExpressionContext secondArgument = arguments.get(1);
Preconditions.checkState(secondArgument.getType() == ExpressionContext.Type.LITERAL,
"Second argument of DISTINCT_COUNT_HLL must be literal: %s", aggregationConfig);
String literal = secondArgument.getLiteral().getStringValue();
Preconditions.checkState(StringUtils.isNumeric(literal),
"Second argument of DISTINCT_COUNT_HLL must be a number: %s", aggregationConfig);
}
if (fieldSpec != null) {
DataType dataType = fieldSpec.getDataType();
Preconditions.checkState(dataType == DataType.BYTES,
"Result type for DISTINCT_COUNT_HLL must be BYTES: %s", aggregationConfig);
}
} else if (functionType == DISTINCTCOUNTHLLPLUS) {
Preconditions.checkState(numArguments >= 1 && numArguments <= 3,
"DISTINCT_COUNT_HLL_PLUS can have at most three arguments: %s", aggregationConfig);
if (numArguments == 2) {
ExpressionContext secondArgument = arguments.get(1);
Preconditions.checkState(secondArgument.getType() == ExpressionContext.Type.LITERAL,
"Second argument of DISTINCT_COUNT_HLL_PLUS must be literal: %s", aggregationConfig);
String literal = secondArgument.getLiteral().getStringValue();
Preconditions.checkState(StringUtils.isNumeric(literal),
"Second argument of DISTINCT_COUNT_HLL_PLUS must be a number: %s", aggregationConfig);
}
if (numArguments == 3) {
ExpressionContext thirdArgument = arguments.get(2);
Preconditions.checkState(thirdArgument.getType() == ExpressionContext.Type.LITERAL,
"Third argument of DISTINCT_COUNT_HLL_PLUS must be literal: %s", aggregationConfig);
String literal = thirdArgument.getLiteral().getStringValue();
Preconditions.checkState(StringUtils.isNumeric(literal),
"Third argument of DISTINCT_COUNT_HLL_PLUS must be a number: %s", aggregationConfig);
}
if (fieldSpec != null) {
DataType dataType = fieldSpec.getDataType();
Preconditions.checkState(dataType == DataType.BYTES,
"Result type for DISTINCT_COUNT_HLL_PLUS must be BYTES: %s", aggregationConfig);
}
} else if (functionType == SUMPRECISION) {
Preconditions.checkState(numArguments >= 2 && numArguments <= 3,
"SUM_PRECISION must specify precision (required), scale (optional): %s", aggregationConfig);
ExpressionContext secondArgument = arguments.get(1);
Preconditions.checkState(secondArgument.getType() == ExpressionContext.Type.LITERAL,
"Second argument of SUM_PRECISION must be literal: %s", aggregationConfig);
String literal = secondArgument.getLiteral().getStringValue();
Preconditions.checkState(StringUtils.isNumeric(literal),
"Second argument of SUM_PRECISION must be a number: %s", aggregationConfig);
if (fieldSpec != null) {
DataType dataType = fieldSpec.getDataType();
Preconditions.checkState(dataType == DataType.BIG_DECIMAL || dataType == DataType.BYTES,
"Result type for DISTINCT_COUNT_HLL must be BIG_DECIMAL or BYTES: %s", aggregationConfig);
}
} else {
Preconditions.checkState(numArguments == 1, "%s can only have one argument: %s", functionType,
aggregationConfig);
}
ExpressionContext firstArgument = arguments.get(0);
Preconditions.checkState(firstArgument.getType() == ExpressionContext.Type.IDENTIFIER,
"First argument of aggregation function: %s must be identifier, got: %s", functionType,
firstArgument.getType());
aggregationSourceColumns.add(firstArgument.getIdentifier());
}
if (schema != null) {
Preconditions.checkState(new HashSet<>(schema.getMetricNames()).equals(aggregationColumns),
"all metric columns must be aggregated");
}
// This is required by MutableSegmentImpl.enableMetricsAggregationIfPossible().
// That code will disable ingestion aggregation if all metrics aren't noDictionaryColumns.
// But if you do that after the table is already created, all future aggregations will
// just be the default value.
Map<String, DictionaryIndexConfig> configPerCol = StandardIndexes.dictionary().getConfig(tableConfig, schema);
aggregationColumns.forEach(column -> {
DictionaryIndexConfig dictConfig = configPerCol.get(column);
Preconditions.checkState(dictConfig != null && dictConfig.isDisabled(),
"Aggregated column: %s must be a no-dictionary column", column);
});
}
// Enrichment configs
List<EnrichmentConfig> enrichmentConfigs = ingestionConfig.getEnrichmentConfigs();
if (enrichmentConfigs != null) {
for (EnrichmentConfig enrichmentConfig : enrichmentConfigs) {
RecordEnricherRegistry.validateEnrichmentConfig(enrichmentConfig,
new RecordEnricherValidationConfig(_disableGroovy));
}
}
// Transform configs
List<TransformConfig> transformConfigs = ingestionConfig.getTransformConfigs();
if (transformConfigs != null) {
Set<String> transformColumns = new HashSet<>();
for (TransformConfig transformConfig : transformConfigs) {
String columnName = transformConfig.getColumnName();
String transformFunction = transformConfig.getTransformFunction();
if (columnName == null || transformFunction == null) {
throw new IllegalStateException(
"columnName/transformFunction cannot be null in TransformConfig " + transformConfig);
}
if (!transformColumns.add(columnName)) {
throw new IllegalStateException("Duplicate transform config found for column '" + columnName + "'");
}
if (schema != null) {
Preconditions.checkState(
schema.getFieldSpecFor(columnName) != null || aggregationSourceColumns.contains(columnName),
"The destination column '" + columnName
+ "' of the transform function must be present in the schema or as a source column for "
+ "aggregations");
}
FunctionEvaluator expressionEvaluator;
if (_disableGroovy && FunctionEvaluatorFactory.isGroovyExpression(transformFunction)) {
throw new IllegalStateException(
"Groovy transform functions are disabled for table config. Found '" + transformFunction
+ "' for column '" + columnName + "'");
}
try {
expressionEvaluator = FunctionEvaluatorFactory.getExpressionEvaluator(transformFunction);
} catch (Exception e) {
throw new IllegalStateException(
"Invalid transform function '" + transformFunction + "' for column '" + columnName + "'", e);
}
List<String> arguments = expressionEvaluator.getArguments();
if (arguments.contains(columnName)) {
throw new IllegalStateException(
"Arguments of a transform function '" + arguments + "' cannot contain the destination column '"
+ columnName + "'");
}
}
}
// Complex configs
ComplexTypeConfig complexTypeConfig = ingestionConfig.getComplexTypeConfig();
if (complexTypeConfig != null && schema != null) {
Map<String, String> prefixesToRename = complexTypeConfig.getPrefixesToRename();
if (MapUtils.isNotEmpty(prefixesToRename)) {
Set<String> fieldNames = schema.getColumnNames();
for (String prefix : prefixesToRename.keySet()) {
for (String field : fieldNames) {
Preconditions.checkState(!field.startsWith(prefix),
"Fields in the schema may not begin with any prefix specified in the prefixesToRename"
+ " config. Name conflict with field: " + field + " and prefix: " + prefix);
}
}
}
}
SchemaConformingTransformerConfig schemaConformingTransformerConfig =
ingestionConfig.getSchemaConformingTransformerConfig();
if (null != schemaConformingTransformerConfig && null != schema) {
SchemaConformingTransformer.validateSchema(schema, schemaConformingTransformerConfig);
}
SchemaConformingTransformerV2Config schemaConformingTransformerV2Config =
ingestionConfig.getSchemaConformingTransformerV2Config();
if (null != schemaConformingTransformerV2Config && null != schema) {
SchemaConformingTransformerV2.validateSchema(schema, schemaConformingTransformerV2Config);
}
}
}
|
@Test
public void ingestionAggregationConfigsTest() {
Schema schema = new Schema.SchemaBuilder().setSchemaName(TABLE_NAME)
.addDateTime("timeColumn", FieldSpec.DataType.TIMESTAMP, "1:MILLISECONDS:EPOCH", "1:MILLISECONDS").build();
IngestionConfig ingestionConfig = new IngestionConfig();
ingestionConfig.setAggregationConfigs(Collections.singletonList(new AggregationConfig("d1", "SUM(s1)")));
TableConfig tableConfig =
new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME).setTimeColumnName("timeColumn")
.setIngestionConfig(ingestionConfig).build();
try {
TableConfigUtils.validateIngestionConfig(tableConfig, schema);
Assert.fail("Should fail due to destination column not being in schema");
} catch (IllegalStateException e) {
// expected
}
schema.addField(new DimensionFieldSpec("d1", FieldSpec.DataType.DOUBLE, true));
tableConfig.getIndexingConfig().setAggregateMetrics(true);
try {
TableConfigUtils.validateIngestionConfig(tableConfig, schema);
Assert.fail("Should fail due to aggregateMetrics being set");
} catch (IllegalStateException e) {
// expected
}
tableConfig.getIndexingConfig().setAggregateMetrics(false);
try {
TableConfigUtils.validateIngestionConfig(tableConfig, schema);
Assert.fail("Should fail due to aggregation column being a dimension");
} catch (IllegalStateException e) {
// expected
}
schema.addField(new MetricFieldSpec("m1", FieldSpec.DataType.DOUBLE));
ingestionConfig.setAggregationConfigs(Collections.singletonList(new AggregationConfig(null, null)));
try {
TableConfigUtils.validateIngestionConfig(tableConfig, schema);
Assert.fail("Should fail due to null columnName/aggregationFunction");
} catch (IllegalStateException e) {
// expected
}
ingestionConfig.setAggregationConfigs(
Arrays.asList(new AggregationConfig("m1", "SUM(s1)"), new AggregationConfig("m1", "SUM(s2)")));
try {
TableConfigUtils.validateIngestionConfig(tableConfig, schema);
Assert.fail("Should fail due to duplicate destination column");
} catch (IllegalStateException e) {
// expected
}
ingestionConfig.setAggregationConfigs(Collections.singletonList(new AggregationConfig("m1", "SUM s1")));
try {
TableConfigUtils.validateIngestionConfig(tableConfig, schema);
Assert.fail("Should fail due to invalid aggregation function");
} catch (IllegalStateException e) {
// expected
}
ingestionConfig.setAggregationConfigs(Collections.singletonList(new AggregationConfig("m1", "SUM(s1 - s2)")));
try {
TableConfigUtils.validateIngestionConfig(tableConfig, schema);
Assert.fail("Should fail due to inner value not being a column");
} catch (IllegalStateException e) {
// expected
}
ingestionConfig.setAggregationConfigs(Collections.singletonList(new AggregationConfig("m1", "SUM(m1)")));
try {
TableConfigUtils.validateIngestionConfig(tableConfig, schema);
Assert.fail("Should fail due to noDictionaryColumns being null");
} catch (IllegalStateException e) {
// expected
}
IndexingConfig indexingConfig = new IndexingConfig();
indexingConfig.setNoDictionaryColumns(List.of());
tableConfig.setIndexingConfig(indexingConfig);
try {
TableConfigUtils.validateIngestionConfig(tableConfig, schema);
Assert.fail("Should fail due to noDictionaryColumns not containing m1");
} catch (IllegalStateException e) {
// expected
}
indexingConfig.setNoDictionaryColumns(List.of("m1"));
ingestionConfig.setAggregationConfigs(Collections.singletonList(new AggregationConfig("m1", "SUM(m1)")));
TableConfigUtils.validateIngestionConfig(tableConfig, schema);
ingestionConfig.setAggregationConfigs(Collections.singletonList(new AggregationConfig("m1", "SUM(s1)")));
TableConfigUtils.validateIngestionConfig(tableConfig, schema);
schema.addField(new MetricFieldSpec("m2", FieldSpec.DataType.DOUBLE));
try {
TableConfigUtils.validateIngestionConfig(tableConfig, schema);
Assert.fail("Should fail due to one metric column not being aggregated");
} catch (IllegalStateException e) {
// expected
}
schema = new Schema.SchemaBuilder().setSchemaName(TABLE_NAME).addMetric("d1", FieldSpec.DataType.BYTES).build();
// distinctcounthllmv is not supported, we expect this to not validate
List<AggregationConfig> aggregationConfigs = Arrays.asList(new AggregationConfig("d1", "DISTINCTCOUNTHLLMV(s1)"));
ingestionConfig.setAggregationConfigs(aggregationConfigs);
tableConfig =
new TableConfigBuilder(TableType.REALTIME).setTableName("myTable_REALTIME").setTimeColumnName("timeColumn")
.setIngestionConfig(ingestionConfig).setNoDictionaryColumns(List.of("d1")).build();
try {
TableConfigUtils.validateIngestionConfig(tableConfig, schema);
Assert.fail("Should fail due to not supported aggregation function");
} catch (IllegalStateException e) {
// expected
}
// distinctcounthll, expect that the function name in various forms (with and without underscores) still validates
schema = new Schema.SchemaBuilder().setSchemaName(TABLE_NAME).addMetric("d1", FieldSpec.DataType.BYTES)
.addMetric("d2", FieldSpec.DataType.BYTES).addMetric("d3", FieldSpec.DataType.BYTES)
.addMetric("d4", FieldSpec.DataType.BYTES).addMetric("d5", FieldSpec.DataType.BYTES).build();
aggregationConfigs = Arrays.asList(new AggregationConfig("d1", "distinct_count_hll(s1)"),
new AggregationConfig("d2", "DISTINCTCOUNTHLL(s1)"), new AggregationConfig("d3", "distinctcounthll(s1)"),
new AggregationConfig("d4", "DISTINCTCOUNT_HLL(s1)"), new AggregationConfig("d5", "DISTINCT_COUNT_HLL(s1)"));
ingestionConfig.setAggregationConfigs(aggregationConfigs);
tableConfig =
new TableConfigBuilder(TableType.REALTIME).setTableName("myTable_REALTIME").setTimeColumnName("timeColumn")
.setIngestionConfig(ingestionConfig).setNoDictionaryColumns(List.of("d1", "d2", "d3", "d4", "d5")).build();
try {
TableConfigUtils.validateIngestionConfig(tableConfig, schema);
} catch (IllegalStateException e) {
Assert.fail("Should not fail due to valid aggregation function", e);
}
// distinctcounthllplus, expect that the function name in various forms still validates
aggregationConfigs = Arrays.asList(new AggregationConfig("d1", "distinct_count_hll_plus(s1)"),
new AggregationConfig("d2", "DISTINCTCOUNTHLLPLUS(s1)"),
new AggregationConfig("d3", "distinctcounthllplus(s1)"),
new AggregationConfig("d4", "DISTINCTCOUNT_HLL_PLUS(s1)"),
new AggregationConfig("d5", "DISTINCT_COUNT_HLL_PLUS(s1)"));
ingestionConfig.setAggregationConfigs(aggregationConfigs);
tableConfig =
new TableConfigBuilder(TableType.REALTIME).setTableName("myTable_REALTIME").setTimeColumnName("timeColumn")
.setIngestionConfig(ingestionConfig).setNoDictionaryColumns(List.of("d1", "d2", "d3", "d4", "d5")).build();
try {
TableConfigUtils.validateIngestionConfig(tableConfig, schema);
} catch (IllegalStateException e) {
Assert.fail("Should not fail due to valid aggregation function", e);
}
// distinctcounthll, expect not specified log2m argument to default to 8
schema = new Schema.SchemaBuilder().setSchemaName(TABLE_NAME).addMetric("d1", FieldSpec.DataType.BYTES).build();
aggregationConfigs = Arrays.asList(new AggregationConfig("d1", "DISTINCTCOUNTHLL(s1)"));
ingestionConfig.setAggregationConfigs(aggregationConfigs);
tableConfig =
new TableConfigBuilder(TableType.REALTIME).setTableName("myTable_REALTIME").setTimeColumnName("timeColumn")
.setIngestionConfig(ingestionConfig).setNoDictionaryColumns(List.of("d1")).build();
try {
TableConfigUtils.validateIngestionConfig(tableConfig, schema);
} catch (IllegalStateException e) {
Assert.fail("Log2m should have defaulted to 8", e);
}
aggregationConfigs = Arrays.asList(new AggregationConfig("d1", "s1 + s2"));
ingestionConfig.setAggregationConfigs(aggregationConfigs);
tableConfig =
new TableConfigBuilder(TableType.REALTIME).setTableName("myTable_REALTIME").setTimeColumnName("timeColumn")
.setIngestionConfig(ingestionConfig).setNoDictionaryColumns(List.of("d1")).build();
try {
TableConfigUtils.validateIngestionConfig(tableConfig, schema);
Assert.fail("Should fail due to multiple arguments");
} catch (IllegalArgumentException e) {
// expected
}
// sumprecision, expect that the function name in various forms (with and without underscores) still validates
schema = new Schema.SchemaBuilder().setSchemaName(TABLE_NAME)
.addSingleValueDimension("s1", FieldSpec.DataType.BIG_DECIMAL).addMetric("d1", FieldSpec.DataType.BIG_DECIMAL)
.addMetric("d2", FieldSpec.DataType.BIG_DECIMAL).addMetric("d3", FieldSpec.DataType.BIG_DECIMAL)
.addMetric("d4", FieldSpec.DataType.BIG_DECIMAL).build();
aggregationConfigs = Arrays.asList(new AggregationConfig("d1", "sum_precision(s1, 10, 32)"),
new AggregationConfig("d2", "SUM_PRECISION(s1, 1)"), new AggregationConfig("d3", "sumprecision(s1, 2)"),
new AggregationConfig("d4", "SUMPRECISION(s1, 10, 99)"));
ingestionConfig.setAggregationConfigs(aggregationConfigs);
tableConfig =
new TableConfigBuilder(TableType.REALTIME).setTableName("myTable_REALTIME").setTimeColumnName("timeColumn")
.setIngestionConfig(ingestionConfig).setNoDictionaryColumns(List.of("d1", "d2", "d3", "d4", "d5")).build();
TableConfigUtils.validateIngestionConfig(tableConfig, schema);
// with too many arguments should fail
schema = new Schema.SchemaBuilder().setSchemaName(TABLE_NAME)
.addSingleValueDimension("s1", FieldSpec.DataType.BIG_DECIMAL).addMetric("d1", FieldSpec.DataType.BIG_DECIMAL)
.build();
aggregationConfigs = Arrays.asList(new AggregationConfig("d1", "sum_precision(s1, 10, 32, 99)"));
ingestionConfig.setAggregationConfigs(aggregationConfigs);
tableConfig =
new TableConfigBuilder(TableType.REALTIME).setTableName("myTable_REALTIME").setTimeColumnName("timeColumn")
.setIngestionConfig(ingestionConfig).setNoDictionaryColumns(List.of("d1")).build();
try {
TableConfigUtils.validateIngestionConfig(tableConfig, schema);
Assert.fail("Should have failed with too many arguments but didn't");
} catch (IllegalStateException e) {
// Expected
}
}
|
@Override
public List<PinotTaskConfig> generateTasks(List<TableConfig> tableConfigs) {
String taskType = MergeRollupTask.TASK_TYPE;
List<PinotTaskConfig> pinotTaskConfigs = new ArrayList<>();
for (TableConfig tableConfig : tableConfigs) {
if (!validate(tableConfig, taskType)) {
continue;
}
String tableNameWithType = tableConfig.getTableName();
LOGGER.info("Start generating task configs for table: {} for task: {}", tableNameWithType, taskType);
// Get all segment metadata
List<SegmentZKMetadata> allSegments = getSegmentsZKMetadataForTable(tableNameWithType);
// Filter segments based on status
List<SegmentZKMetadata> preSelectedSegmentsBasedOnStatus
= filterSegmentsBasedOnStatus(tableConfig.getTableType(), allSegments);
// Select current segment snapshot based on lineage, filter out empty segments
SegmentLineage segmentLineage = _clusterInfoAccessor.getSegmentLineage(tableNameWithType);
Set<String> preSelectedSegmentsBasedOnLineage = new HashSet<>();
for (SegmentZKMetadata segment : preSelectedSegmentsBasedOnStatus) {
preSelectedSegmentsBasedOnLineage.add(segment.getSegmentName());
}
SegmentLineageUtils.filterSegmentsBasedOnLineageInPlace(preSelectedSegmentsBasedOnLineage, segmentLineage);
List<SegmentZKMetadata> preSelectedSegments = new ArrayList<>();
for (SegmentZKMetadata segment : preSelectedSegmentsBasedOnStatus) {
if (preSelectedSegmentsBasedOnLineage.contains(segment.getSegmentName()) && segment.getTotalDocs() > 0
&& MergeTaskUtils.allowMerge(segment)) {
preSelectedSegments.add(segment);
}
}
if (preSelectedSegments.isEmpty()) {
// Reset the watermark time if no segment found. This covers the case where the table is newly created or
// all segments for the existing table got deleted.
resetDelayMetrics(tableNameWithType);
LOGGER.info("Skip generating task: {} for table: {}, no segment is found.", taskType, tableNameWithType);
continue;
}
// Sort segments based on startTimeMs, endTimeMs and segmentName in ascending order
preSelectedSegments.sort((a, b) -> {
long aStartTime = a.getStartTimeMs();
long bStartTime = b.getStartTimeMs();
if (aStartTime != bStartTime) {
return Long.compare(aStartTime, bStartTime);
}
long aEndTime = a.getEndTimeMs();
long bEndTime = b.getEndTimeMs();
return aEndTime != bEndTime ? Long.compare(aEndTime, bEndTime)
: a.getSegmentName().compareTo(b.getSegmentName());
});
// Sort merge levels based on bucket time period
Map<String, String> taskConfigs = tableConfig.getTaskConfig().getConfigsForTaskType(taskType);
Map<String, Map<String, String>> mergeLevelToConfigs = MergeRollupTaskUtils.getLevelToConfigMap(taskConfigs);
List<Map.Entry<String, Map<String, String>>> sortedMergeLevelConfigs =
new ArrayList<>(mergeLevelToConfigs.entrySet());
sortedMergeLevelConfigs.sort(Comparator.comparingLong(
e -> TimeUtils.convertPeriodToMillis(e.getValue().get(MinionConstants.MergeTask.BUCKET_TIME_PERIOD_KEY))));
// Get incomplete merge levels
Set<String> inCompleteMergeLevels = new HashSet<>();
for (Map.Entry<String, TaskState> entry : TaskGeneratorUtils.getIncompleteTasks(taskType, tableNameWithType,
_clusterInfoAccessor).entrySet()) {
for (PinotTaskConfig taskConfig : _clusterInfoAccessor.getTaskConfigs(entry.getKey())) {
inCompleteMergeLevels.add(taskConfig.getConfigs().get(MergeRollupTask.MERGE_LEVEL_KEY));
}
}
// Get scheduling mode which is "processFromWatermark" by default. If "processAll" mode is enabled, there will be
// no watermark, and each round we pick the buckets in chronological order which have unmerged segments.
boolean processAll = MergeTask.PROCESS_ALL_MODE.equalsIgnoreCase(taskConfigs.get(MergeTask.MODE));
ZNRecord mergeRollupTaskZNRecord = _clusterInfoAccessor
.getMinionTaskMetadataZNRecord(MinionConstants.MergeRollupTask.TASK_TYPE, tableNameWithType);
int expectedVersion = mergeRollupTaskZNRecord != null ? mergeRollupTaskZNRecord.getVersion() : -1;
MergeRollupTaskMetadata mergeRollupTaskMetadata =
mergeRollupTaskZNRecord != null ? MergeRollupTaskMetadata.fromZNRecord(mergeRollupTaskZNRecord)
: new MergeRollupTaskMetadata(tableNameWithType, new TreeMap<>());
List<PinotTaskConfig> pinotTaskConfigsForTable = new ArrayList<>();
// Schedule tasks from lowest to highest merge level (e.g. Hourly -> Daily -> Monthly -> Yearly)
String mergeLevel = null;
for (Map.Entry<String, Map<String, String>> mergeLevelConfig : sortedMergeLevelConfigs) {
String lowerMergeLevel = mergeLevel;
mergeLevel = mergeLevelConfig.getKey();
Map<String, String> mergeConfigs = mergeLevelConfig.getValue();
// Skip scheduling if there's incomplete task for current mergeLevel
if (inCompleteMergeLevels.contains(mergeLevel)) {
LOGGER.info("Found incomplete task of merge level: {} for the same table: {}, Skipping task generation: {}",
mergeLevel, tableNameWithType, taskType);
continue;
}
// Get the bucket size, buffer size and maximum number of parallel buckets (by default 1)
String bucketPeriod = mergeConfigs.get(MergeTask.BUCKET_TIME_PERIOD_KEY);
long bucketMs = TimeUtils.convertPeriodToMillis(bucketPeriod);
if (bucketMs <= 0) {
LOGGER.error("Bucket time period: {} (table : {}, mergeLevel : {}) must be larger than 0", bucketPeriod,
tableNameWithType, mergeLevel);
continue;
}
String bufferPeriod = mergeConfigs.get(MergeTask.BUFFER_TIME_PERIOD_KEY);
long bufferMs = TimeUtils.convertPeriodToMillis(bufferPeriod);
if (bufferMs < 0) {
LOGGER.error("Buffer time period: {} (table : {}, mergeLevel : {}) must be larger or equal to 0",
bufferPeriod, tableNameWithType, mergeLevel);
continue;
}
String maxNumParallelBucketsStr = mergeConfigs.get(MergeTask.MAX_NUM_PARALLEL_BUCKETS);
int maxNumParallelBuckets = maxNumParallelBucketsStr != null ? Integer.parseInt(maxNumParallelBucketsStr)
: DEFAULT_NUM_PARALLEL_BUCKETS;
if (maxNumParallelBuckets <= 0) {
LOGGER.error("Maximum number of parallel buckets: {} (table : {}, mergeLevel : {}) must be larger than 0",
maxNumParallelBuckets, tableNameWithType, mergeLevel);
continue;
}
// Get bucket start/end time
long preSelectedSegStartTimeMs = preSelectedSegments.get(0).getStartTimeMs();
long bucketStartMs = preSelectedSegStartTimeMs / bucketMs * bucketMs;
long watermarkMs = 0;
if (!processAll) {
// Get watermark from MergeRollupTaskMetadata ZNode
// bucketStartMs = watermarkMs
// bucketEndMs = bucketStartMs + bucketMs
watermarkMs = getWatermarkMs(preSelectedSegStartTimeMs, bucketMs, mergeLevel,
mergeRollupTaskMetadata);
bucketStartMs = watermarkMs;
}
long bucketEndMs = bucketStartMs + bucketMs;
if (lowerMergeLevel == null) {
long lowestLevelMaxValidBucketEndTimeMs = Long.MIN_VALUE;
for (SegmentZKMetadata preSelectedSegment : preSelectedSegments) {
// Compute lowestLevelMaxValidBucketEndTimeMs among segments that are ready for merge
long currentValidBucketEndTimeMs =
getValidBucketEndTimeMsForSegment(preSelectedSegment, bucketMs, bufferMs);
lowestLevelMaxValidBucketEndTimeMs =
Math.max(lowestLevelMaxValidBucketEndTimeMs, currentValidBucketEndTimeMs);
}
_tableLowestLevelMaxValidBucketEndTimeMs.put(tableNameWithType, lowestLevelMaxValidBucketEndTimeMs);
}
// Create metrics even if there's no task scheduled, this helps the case that the controller is restarted
// but the metrics are not available until the controller schedules a valid task
List<String> sortedMergeLevels =
sortedMergeLevelConfigs.stream().map(e -> e.getKey()).collect(Collectors.toList());
if (processAll) {
createOrUpdateNumBucketsToProcessMetrics(tableNameWithType, mergeLevel, lowerMergeLevel, bufferMs, bucketMs,
preSelectedSegments, sortedMergeLevels);
} else {
createOrUpdateDelayMetrics(tableNameWithType, mergeLevel, null, watermarkMs, bufferMs, bucketMs);
}
if (!isValidBucketEndTime(bucketEndMs, bufferMs, lowerMergeLevel, mergeRollupTaskMetadata, processAll)) {
LOGGER.info("Bucket with start: {} and end: {} (table : {}, mergeLevel : {}, mode : {}) cannot be merged yet",
bucketStartMs, bucketEndMs, tableNameWithType, mergeLevel, processAll ? MergeTask.PROCESS_ALL_MODE
: MergeTask.PROCESS_FROM_WATERMARK_MODE);
continue;
}
// Find overlapping segments for each bucket, skip the buckets that has all segments merged
List<List<SegmentZKMetadata>> selectedSegmentsForAllBuckets = new ArrayList<>(maxNumParallelBuckets);
List<SegmentZKMetadata> selectedSegmentsForBucket = new ArrayList<>();
boolean hasUnmergedSegments = false;
boolean hasSpilledOverData = false;
boolean areAllSegmentsReadyToMerge = true;
// The for loop terminates in following cases:
// 1. Found buckets with unmerged segments:
// For each bucket find all segments overlapping with the target bucket, skip the bucket if all overlapping
// segments are merged. Schedule k (numParallelBuckets) buckets at most, and stops at the first bucket that
// contains spilled over data.
// One may wonder how a segment with records spanning different buckets is handled. The short answer is that
// it will be cut into multiple segments, each for a separate bucket. This is achieved by setting bucket time
// period as PARTITION_BUCKET_TIME_PERIOD when generating PinotTaskConfigs
// 2. There's no bucket with unmerged segments, skip scheduling
for (SegmentZKMetadata preSelectedSegment : preSelectedSegments) {
long startTimeMs = preSelectedSegment.getStartTimeMs();
if (startTimeMs < bucketEndMs) {
long endTimeMs = preSelectedSegment.getEndTimeMs();
if (endTimeMs >= bucketStartMs) {
// For segments overlapping with current bucket, add to the result list
if (!isMergedSegment(preSelectedSegment, mergeLevel, sortedMergeLevels)) {
hasUnmergedSegments = true;
}
if (!isMergedSegment(preSelectedSegment, lowerMergeLevel, sortedMergeLevels)) {
areAllSegmentsReadyToMerge = false;
}
if (hasSpilledOverData(preSelectedSegment, bucketMs)) {
hasSpilledOverData = true;
}
selectedSegmentsForBucket.add(preSelectedSegment);
}
// endTimeMs < bucketStartMs
// Haven't find the first overlapping segment, continue to the next segment
} else {
// Has gone through all overlapping segments for current bucket
if (hasUnmergedSegments && areAllSegmentsReadyToMerge) {
// Add the bucket if there are unmerged segments
selectedSegmentsForAllBuckets.add(selectedSegmentsForBucket);
}
if (selectedSegmentsForAllBuckets.size() == maxNumParallelBuckets || hasSpilledOverData) {
// If there are enough buckets or found spilled over data, schedule merge tasks
break;
} else {
// Start with a new bucket
// TODO: If there are many small merged segments, we should merge them again
selectedSegmentsForBucket = new ArrayList<>();
hasUnmergedSegments = false;
areAllSegmentsReadyToMerge = true;
bucketStartMs = (startTimeMs / bucketMs) * bucketMs;
bucketEndMs = bucketStartMs + bucketMs;
if (!isValidBucketEndTime(bucketEndMs, bufferMs, lowerMergeLevel, mergeRollupTaskMetadata, processAll)) {
break;
}
if (!isMergedSegment(preSelectedSegment, mergeLevel, sortedMergeLevels)) {
hasUnmergedSegments = true;
}
if (!isMergedSegment(preSelectedSegment, lowerMergeLevel, sortedMergeLevels)) {
areAllSegmentsReadyToMerge = false;
}
if (hasSpilledOverData(preSelectedSegment, bucketMs)) {
hasSpilledOverData = true;
}
selectedSegmentsForBucket.add(preSelectedSegment);
}
}
}
// Add the last bucket if it contains unmerged segments and is not added before
if (hasUnmergedSegments && areAllSegmentsReadyToMerge && (selectedSegmentsForAllBuckets.isEmpty() || (
selectedSegmentsForAllBuckets.get(selectedSegmentsForAllBuckets.size() - 1)
!= selectedSegmentsForBucket))) {
selectedSegmentsForAllBuckets.add(selectedSegmentsForBucket);
}
if (selectedSegmentsForAllBuckets.isEmpty()) {
LOGGER.info("No unmerged segment found for table: {}, mergeLevel: {}", tableNameWithType, mergeLevel);
continue;
}
// Bump up watermark to the earliest start time of selected segments truncated to the closest bucket boundary
long newWatermarkMs = selectedSegmentsForAllBuckets.get(0).get(0).getStartTimeMs() / bucketMs * bucketMs;
mergeRollupTaskMetadata.getWatermarkMap().put(mergeLevel, newWatermarkMs);
LOGGER.info("Update watermark for table: {}, mergeLevel: {} from: {} to: {}", tableNameWithType, mergeLevel,
watermarkMs, newWatermarkMs);
// Update the delay metrics
if (!processAll) {
createOrUpdateDelayMetrics(tableNameWithType, mergeLevel, lowerMergeLevel, newWatermarkMs, bufferMs,
bucketMs);
}
// Create task configs
int maxNumRecordsPerTask =
mergeConfigs.get(MergeRollupTask.MAX_NUM_RECORDS_PER_TASK_KEY) != null ? Integer.parseInt(
mergeConfigs.get(MergeRollupTask.MAX_NUM_RECORDS_PER_TASK_KEY)) : DEFAULT_MAX_NUM_RECORDS_PER_TASK;
SegmentPartitionConfig segmentPartitionConfig = tableConfig.getIndexingConfig().getSegmentPartitionConfig();
if (segmentPartitionConfig == null) {
for (List<SegmentZKMetadata> selectedSegmentsPerBucket : selectedSegmentsForAllBuckets) {
pinotTaskConfigsForTable.addAll(
createPinotTaskConfigs(selectedSegmentsPerBucket, tableConfig, maxNumRecordsPerTask, mergeLevel,
null, mergeConfigs, taskConfigs));
}
} else {
// For partitioned table, schedule separate tasks for each partitionId (partitionId is constructed from
// partitions of all partition columns. There should be exact match between partition columns of segment and
// partition columns of table configuration, and there is only partition per column in segment metadata).
// Other segments which do not meet these conditions are considered as outlier segments, and additional tasks
// are generated for them.
Map<String, ColumnPartitionConfig> columnPartitionMap = segmentPartitionConfig.getColumnPartitionMap();
List<String> partitionColumns = new ArrayList<>(columnPartitionMap.keySet());
for (List<SegmentZKMetadata> selectedSegmentsPerBucket : selectedSegmentsForAllBuckets) {
Map<List<Integer>, List<SegmentZKMetadata>> partitionToSegments = new HashMap<>();
List<SegmentZKMetadata> outlierSegments = new ArrayList<>();
for (SegmentZKMetadata selectedSegment : selectedSegmentsPerBucket) {
SegmentPartitionMetadata segmentPartitionMetadata = selectedSegment.getPartitionMetadata();
List<Integer> partitions = new ArrayList<>();
if (segmentPartitionMetadata != null && columnPartitionMap.keySet()
.equals(segmentPartitionMetadata.getColumnPartitionMap().keySet())) {
for (String partitionColumn : partitionColumns) {
if (segmentPartitionMetadata.getPartitions(partitionColumn).size() == 1) {
partitions.add(segmentPartitionMetadata.getPartitions(partitionColumn).iterator().next());
} else {
partitions.clear();
break;
}
}
}
if (partitions.isEmpty()) {
outlierSegments.add(selectedSegment);
} else {
partitionToSegments.computeIfAbsent(partitions, k -> new ArrayList<>()).add(selectedSegment);
}
}
for (Map.Entry<List<Integer>, List<SegmentZKMetadata>> entry : partitionToSegments.entrySet()) {
List<Integer> partition = entry.getKey();
List<SegmentZKMetadata> partitionedSegments = entry.getValue();
pinotTaskConfigsForTable.addAll(
createPinotTaskConfigs(partitionedSegments, tableConfig, maxNumRecordsPerTask, mergeLevel,
partition, mergeConfigs, taskConfigs));
}
if (!outlierSegments.isEmpty()) {
pinotTaskConfigsForTable.addAll(
createPinotTaskConfigs(outlierSegments, tableConfig, maxNumRecordsPerTask, mergeLevel,
null, mergeConfigs, taskConfigs));
}
}
}
}
// Write updated watermark map to zookeeper
if (!processAll) {
try {
_clusterInfoAccessor
.setMinionTaskMetadata(mergeRollupTaskMetadata, MinionConstants.MergeRollupTask.TASK_TYPE,
expectedVersion);
} catch (ZkException e) {
LOGGER.error(
"Version changed while updating merge/rollup task metadata for table: {}, skip scheduling. There are "
+ "multiple task schedulers for the same table, need to investigate!", tableNameWithType);
continue;
}
}
pinotTaskConfigs.addAll(pinotTaskConfigsForTable);
LOGGER.info("Finished generating task configs for table: {} for task: {}, numTasks: {}", tableNameWithType,
taskType, pinotTaskConfigsForTable.size());
}
// Clean up metrics
cleanUpDelayMetrics(tableConfigs);
return pinotTaskConfigs;
}
|
@Test
public void testNumParallelBuckets() {
Map<String, Map<String, String>> taskConfigsMap = new HashMap<>();
Map<String, String> tableTaskConfigs = new HashMap<>();
tableTaskConfigs.put("daily.mergeType", "concat");
tableTaskConfigs.put("daily.bufferTimePeriod", "2d");
tableTaskConfigs.put("daily.bucketTimePeriod", "1d");
tableTaskConfigs.put("daily.maxNumRecordsPerSegment", "1000000");
tableTaskConfigs.put("daily.maxNumParallelBuckets", "3");
taskConfigsMap.put(MinionConstants.MergeRollupTask.TASK_TYPE, tableTaskConfigs);
TableConfig offlineTableConfig = getTableConfig(TableType.OFFLINE, taskConfigsMap);
ClusterInfoAccessor mockClusterInfoProvide = mock(ClusterInfoAccessor.class);
String segmentName1 = "testTable__1";
String segmentName2 = "testTable__2";
String segmentName3 = "testTable__3";
String segmentName4 = "testTable__4";
String segmentName5 = "testTable__5";
SegmentZKMetadata metadata1 =
getSegmentZKMetadata(segmentName1, 86_400_000L, 90_000_000L, TimeUnit.MILLISECONDS, "download1");
SegmentZKMetadata metadata2 =
getSegmentZKMetadata(segmentName2, 86_400_000L, 100_000_000L, TimeUnit.MILLISECONDS, "download2");
SegmentZKMetadata metadata3 =
getSegmentZKMetadata(segmentName3, 172_800_000L, 173_000_000L, TimeUnit.MILLISECONDS, "download3");
SegmentZKMetadata metadata4 =
getSegmentZKMetadata(segmentName4, 259_200_000L, 260_000_000L, TimeUnit.MILLISECONDS, "download4");
SegmentZKMetadata metadata5 =
getSegmentZKMetadata(segmentName5, 345_600_000L, 346_000_000L, TimeUnit.MILLISECONDS, "download5");
// No spilled over data
when(mockClusterInfoProvide.getSegmentsZKMetadata(OFFLINE_TABLE_NAME)).thenReturn(
Lists.newArrayList(metadata1, metadata2, metadata3, metadata4, metadata5));
when(mockClusterInfoProvide.getIdealState(OFFLINE_TABLE_NAME)).thenReturn(getIdealState(OFFLINE_TABLE_NAME,
Lists.newArrayList(segmentName1, segmentName2, segmentName3, segmentName4, segmentName5)));
MergeRollupTaskGenerator generator = new MergeRollupTaskGenerator();
generator.init(mockClusterInfoProvide);
List<PinotTaskConfig> pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(offlineTableConfig));
assertEquals(pinotTaskConfigs.size(), 3);
checkPinotTaskConfig(pinotTaskConfigs.get(0).getConfigs(), segmentName1 + "," + segmentName2, DAILY, "concat", "1d",
null, "1000000");
checkPinotTaskConfig(pinotTaskConfigs.get(1).getConfigs(), segmentName3, DAILY, "concat", "1d", null, "1000000");
checkPinotTaskConfig(pinotTaskConfigs.get(2).getConfigs(), segmentName4, DAILY, "concat", "1d", null, "1000000");
// Has spilled over data
String segmentName6 = "testTable__6";
SegmentZKMetadata metadata6 =
getSegmentZKMetadata(segmentName6, 172_800_000L, 260_000_000L, TimeUnit.MILLISECONDS, null);
when(mockClusterInfoProvide.getSegmentsZKMetadata(OFFLINE_TABLE_NAME)).thenReturn(
Lists.newArrayList(metadata1, metadata2, metadata3, metadata4, metadata5, metadata6));
when(mockClusterInfoProvide.getIdealState(OFFLINE_TABLE_NAME)).thenReturn(getIdealState(OFFLINE_TABLE_NAME,
Lists.newArrayList(segmentName1, segmentName2, segmentName3, segmentName4, segmentName5, segmentName6)));
pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(offlineTableConfig));
assertEquals(pinotTaskConfigs.size(), 2);
checkPinotTaskConfig(pinotTaskConfigs.get(0).getConfigs(), segmentName1 + "," + segmentName2, DAILY, "concat", "1d",
null, "1000000");
checkPinotTaskConfig(pinotTaskConfigs.get(1).getConfigs(), segmentName3 + "," + segmentName6, DAILY, "concat", "1d",
null, "1000000");
// Has time bucket without overlapping segments
when(mockClusterInfoProvide.getSegmentsZKMetadata(OFFLINE_TABLE_NAME)).thenReturn(
Lists.newArrayList(metadata1, metadata2, metadata4, metadata5));
when(mockClusterInfoProvide.getIdealState(OFFLINE_TABLE_NAME)).thenReturn(getIdealState(OFFLINE_TABLE_NAME,
Lists.newArrayList(segmentName1, segmentName2, segmentName3, segmentName4, segmentName5)));
pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(offlineTableConfig));
assertEquals(pinotTaskConfigs.size(), 3);
checkPinotTaskConfig(pinotTaskConfigs.get(0).getConfigs(), segmentName1 + "," + segmentName2, DAILY, "concat", "1d",
null, "1000000");
checkPinotTaskConfig(pinotTaskConfigs.get(1).getConfigs(), segmentName4, DAILY, "concat", "1d", null, "1000000");
checkPinotTaskConfig(pinotTaskConfigs.get(2).getConfigs(), segmentName5, DAILY, "concat", "1d", null, "1000000");
// Has un-merged buckets
metadata6 = getSegmentZKMetadata(segmentName6, 432_000_000L, 432_100_000L, TimeUnit.MILLISECONDS, null);
metadata1.setCustomMap(ImmutableMap.of(MinionConstants.MergeRollupTask.SEGMENT_ZK_METADATA_MERGE_LEVEL_KEY, DAILY));
metadata2.setCustomMap(ImmutableMap.of(MinionConstants.MergeRollupTask.SEGMENT_ZK_METADATA_MERGE_LEVEL_KEY, DAILY));
metadata4.setCustomMap(ImmutableMap.of(MinionConstants.MergeRollupTask.SEGMENT_ZK_METADATA_MERGE_LEVEL_KEY, DAILY));
when(mockClusterInfoProvide.getSegmentsZKMetadata(OFFLINE_TABLE_NAME)).thenReturn(
Lists.newArrayList(metadata1, metadata2, metadata3, metadata4, metadata5, metadata6));
when(mockClusterInfoProvide.getIdealState(OFFLINE_TABLE_NAME)).thenReturn(getIdealState(OFFLINE_TABLE_NAME,
Lists.newArrayList(segmentName1, segmentName2, segmentName3, segmentName4, segmentName5, segmentName6)));
pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(offlineTableConfig));
assertEquals(pinotTaskConfigs.size(), 3);
checkPinotTaskConfig(pinotTaskConfigs.get(0).getConfigs(), segmentName3, DAILY, "concat", "1d", null, "1000000");
checkPinotTaskConfig(pinotTaskConfigs.get(1).getConfigs(), segmentName5, DAILY, "concat", "1d", null, "1000000");
checkPinotTaskConfig(pinotTaskConfigs.get(2).getConfigs(), segmentName6, DAILY, "concat", "1d", null, "1000000");
// Test number of scheduled buckets < numParallelBuckets
tableTaskConfigs.put("monthly.mergeType", "concat");
tableTaskConfigs.put("monthly.bufferTimePeriod", "30d");
tableTaskConfigs.put("monthly.bucketTimePeriod", "30d");
tableTaskConfigs.put("monthly.maxNumRecordsPerSegment", "1000000");
tableTaskConfigs.put("monthly.maxNumParallelBuckets", "3");
TreeMap<String, Long> waterMarkMap = new TreeMap<>();
// Watermark for daily is at 30 days since epoch
waterMarkMap.put(DAILY, 2_592_000_000L);
when(mockClusterInfoProvide.getMinionTaskMetadataZNRecord(MinionConstants.MergeRollupTask.TASK_TYPE,
OFFLINE_TABLE_NAME)).thenReturn(new MergeRollupTaskMetadata(OFFLINE_TABLE_NAME, waterMarkMap).toZNRecord());
String segmentName7 = "testTable__7";
String segmentName8 = "testTable__8";
SegmentZKMetadata metadata7 =
getSegmentZKMetadata(segmentName7, 86_400_000L, 90_000_000L, TimeUnit.MILLISECONDS, "download7");
SegmentZKMetadata metadata8 =
getSegmentZKMetadata(segmentName8, 2_592_000_000L, 2_600_000_000L, TimeUnit.MILLISECONDS, "download8");
metadata7.setCustomMap(ImmutableMap.of(MinionConstants.MergeRollupTask.SEGMENT_ZK_METADATA_MERGE_LEVEL_KEY, DAILY));
metadata8.setCustomMap(ImmutableMap.of(MinionConstants.MergeRollupTask.SEGMENT_ZK_METADATA_MERGE_LEVEL_KEY, DAILY));
when(mockClusterInfoProvide.getSegmentsZKMetadata(OFFLINE_TABLE_NAME)).thenReturn(
Lists.newArrayList(metadata7, metadata8));
when(mockClusterInfoProvide.getIdealState(OFFLINE_TABLE_NAME)).thenReturn(
getIdealState(OFFLINE_TABLE_NAME, Lists.newArrayList(segmentName7, segmentName8)));
pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(offlineTableConfig));
assertEquals(pinotTaskConfigs.size(), 1);
checkPinotTaskConfig(pinotTaskConfigs.get(0).getConfigs(), segmentName7, MONTHLY, "concat", "30d", null, "1000000");
}
|
public Stream<Hit> stream() {
if (nPostingLists == 0) {
return Stream.empty();
}
return StreamSupport.stream(new PredicateSpliterator(), false);
}
|
@Test
void requireThatAHighKCanYieldResults() {
PredicateSearch search = createPredicateSearch(
new byte[]{2},
postingList(SubqueryBitmap.ALL_SUBQUERIES,
entry(0, 0x00010001)),
postingList(SubqueryBitmap.ALL_SUBQUERIES,
entry(0, 0x000200ff)));
assertEquals(List.of(new Hit(0)).toString(), search.stream().toList().toString());
}
|
@PostMapping(value = "v1/stub/sendPip", consumes = "application/json", produces = "application/json")
public void sendPipRequest(@RequestBody StubRequest request) {
final Optional<EidSession> result = sessionRepo.findById(request.getSessionId());
if (!result.isPresent()) {
throw new ClientException("Could not find session");
}
final EidSession session = result.get();
final String status = request.getStatus();
if (!"success".equals(status)) {
confirmService.sendError(session.getReturnUrl(), session.getConfirmId(), session.getConfirmSecret(), status);
} else {
final Confirmation confirm = new Confirmation(
request.getPolymorph(), DocumentType.fromValue(request.getDocumentType()), request.getSequenceNo()
);
confirmService.sendAssertion(session.getReturnUrl(), session.getConfirmId(), session.getConfirmSecret(),
PolymorphType.PIP, confirm);
}
}
|
@Test
public void testStub() {
EidSession session = new EidSession();
session.setReturnUrl("http://localhost");
session.setId("id");
session.setConfirmId("confirmId");
session.setConfirmSecret("secret");
Mockito.when(sessionRepo.findById("id")).thenReturn(Optional.of(session));
StubRequest request = new StubRequest();
request.setDocumentType("NL-Rijbewijs");
request.setPolymorph(Base64.decode("eHl6"));
request.setSequenceNo("SSSSSSSSSSSSS");
request.setSessionId("id");
request.setStatus("success");
controller.sendPipRequest(request);
Mockito.verify(confirmService).sendAssertion(
Mockito.eq("http://localhost"), Mockito.eq("confirmId"), Mockito.eq("secret"),
Mockito.eq(PolymorphType.PIP), Mockito.eq(
new Confirmation(Base64.decode("eHl6"), DocumentType.DL, "SSSSSSSSSSSSS")
)
);
}
|
@Override
public UserDetails loadUserByUsername(String userId)
throws UsernameNotFoundException {
User user = null;
try {
user = this.identityService.createUserQuery()
.userId(userId)
.singleResult();
} catch (FlowableException ex) {
// don't care
}
if (null == user) {
throw new UsernameNotFoundException(
String.format("user (%s) could not be found", userId));
}
return createFlowableUser(user);
}
|
@Test
public void testLoadingUserShouldBeCaseSensitive() {
assertThatThrownBy(() -> userDetailsService.loadUserByUsername("kErMiT"))
.isInstanceOf(UsernameNotFoundException.class)
.hasMessage("user (kErMiT) could not be found");
}
|
@Override
public void getConfig(FederationConfig.Builder builder) {
for (Target target : resolvedTargets.values())
builder.target(target.getTargetConfig());
targetSelector.ifPresent(selector -> builder.targetSelector(selector.getGlobalComponentId().stringValue()));
}
|
@Test
void leaders_must_be_the_first_search_chain_in_a_target() throws Exception {
FederationFixture f = new ProvidersWithSourceFixture();
FederationConfig federationConfig = getConfig(f.federationSearchWithDefaultSources);
List<FederationConfig.Target.SearchChain> searchChain = federationConfig.target(0).searchChain();
assertEquals("provider1", searchChain.get(0).providerId());
assertEquals("provider2", searchChain.get(1).providerId());
}
|
@VisibleForTesting
static void validateDefaultTopicFormats(final KsqlConfig config) {
validateTopicFormat(config, KsqlConfig.KSQL_DEFAULT_KEY_FORMAT_CONFIG, "key");
validateTopicFormat(config, KsqlConfig.KSQL_DEFAULT_VALUE_FORMAT_CONFIG, "value");
}
|
@Test
public void shouldValidateDefaultFormatsWithCaseInsensitivity() {
// Given:
final KsqlConfig config = configWith(ImmutableMap.of(
KsqlConfig.KSQL_DEFAULT_VALUE_FORMAT_CONFIG, "avro"
));
// When:
KsqlServerMain.validateDefaultTopicFormats(config);
// Then: No exception
}
|
@Override
public ChannelFuture writeHeaders(ChannelHandlerContext ctx, int streamId, Http2Headers headers, int padding,
boolean endStream, ChannelPromise promise) {
return writeHeaders0(ctx, streamId, headers, false, 0, (short) 0, false, padding, endStream, promise);
}
|
@Test
public void canWriteHeaderFrameAfterGoAwaySent() throws Exception {
writeAllFlowControlledFrames();
createStream(STREAM_ID, false);
goAwaySent(0);
ChannelPromise promise = newPromise();
encoder.writeHeaders(ctx, STREAM_ID, EmptyHttp2Headers.INSTANCE, 0, false, promise);
verify(writer).writeHeaders(eq(ctx), eq(STREAM_ID), eq(EmptyHttp2Headers.INSTANCE),
eq(0), eq(false), eq(promise));
}
|
public static Projection of(final Collection<? extends SelectItem> selectItems) {
return new Projection(selectItems);
}
|
@Test
public void shouldImplementEqualsAndHashCoe() {
new EqualsTester()
.addEqualityGroup(
Projection.of(ImmutableList.of(ALL_COLUMNS)),
Projection.of(ImmutableList.of(ALL_COLUMNS))
)
.addEqualityGroup(
Projection.of(ImmutableList.of(ALL_A_COLUMNS))
)
.testEquals();
}
|
public static Range<Integer> integerRange(String range) {
return ofString(range, Integer::parseInt, Integer.class);
}
|
@Test
public void testUnboundedRangeStringIsRejected() {
PostgreSQLGuavaRangeType instance = PostgreSQLGuavaRangeType.INSTANCE;
assertEquals(Range.all(), instance.integerRange("(,)"));
}
|
@Override
public String toString() {
return ace.getRecipient().toString();
}
|
@Test
public void testToString() {
UIRepositoryObjectAcl uiAcl = new UIRepositoryObjectAcl( createObjectAce() );
String s = uiAcl.toString();
assertNotNull( s );
assertTrue( s.contains( RECIPIENT1 ) );
}
|
public boolean removeIfInt(final IntPredicate filter)
{
requireNonNull(filter);
final int[] elements = this.elements;
@DoNotSub final int size = this.size;
if (size > 0)
{
int[] filteredElements = null;
@DoNotSub int j = -1;
for (@DoNotSub int i = 0; i < size; i++)
{
final int value = elements[i];
if (filter.test(value))
{
if (null == filteredElements)
{
filteredElements = Arrays.copyOf(elements, size);
j = i - 1;
}
}
else if (null != filteredElements)
{
filteredElements[++j] = value;
}
}
if (null != filteredElements)
{
this.elements = filteredElements;
this.size = j + 1;
return true;
}
}
return false;
}
|
@Test
void removeIfIntThrowsNullPointerExceptionIsFilterIsNull()
{
assertThrowsExactly(NullPointerException.class, () -> list.removeIfInt(null));
}
|
static String getRelativeFileInternal(File canonicalBaseFile, File canonicalFileToRelativize) {
List<String> basePath = getPathComponents(canonicalBaseFile);
List<String> pathToRelativize = getPathComponents(canonicalFileToRelativize);
//if the roots aren't the same (i.e. different drives on a windows machine), we can't construct a relative
//path from one to the other, so just return the canonical file
if (!basePath.get(0).equals(pathToRelativize.get(0))) {
return canonicalFileToRelativize.getPath();
}
int commonDirs;
StringBuilder sb = new StringBuilder();
for (commonDirs=1; commonDirs<basePath.size() && commonDirs<pathToRelativize.size(); commonDirs++) {
if (!basePath.get(commonDirs).equals(pathToRelativize.get(commonDirs))) {
break;
}
}
boolean first = true;
for (int i=commonDirs; i<basePath.size(); i++) {
if (!first) {
sb.append(File.separatorChar);
} else {
first = false;
}
sb.append("..");
}
first = true;
for (int i=commonDirs; i<pathToRelativize.size(); i++) {
if (first) {
if (sb.length() != 0) {
sb.append(File.separatorChar);
}
first = false;
} else {
sb.append(File.separatorChar);
}
sb.append(pathToRelativize.get(i));
}
if (sb.length() == 0) {
return ".";
}
return sb.toString();
}
|
@Test
public void pathUtilTest11() {
File[] roots = File.listRoots();
File basePath = new File(roots[0] + "some");
File relativePath = new File(roots[0] + "some" + File.separatorChar + "dir" + File.separatorChar + "dir2");
String path = PathUtil.getRelativeFileInternal(basePath, relativePath);
Assert.assertEquals(path, "dir" + File.separatorChar + "dir2");
}
|
public boolean isAncestorOf(ResourceGroupId descendant)
{
List<String> descendantSegments = descendant.getSegments();
if (segments.size() >= descendantSegments.size()) {
return false;
}
return descendantSegments.subList(0, segments.size()).equals(segments);
}
|
@Test
public void testIsAncestor()
{
ResourceGroupId root = new ResourceGroupId("root");
ResourceGroupId rootA = new ResourceGroupId(root, "a");
ResourceGroupId rootAFoo = new ResourceGroupId(rootA, "foo");
ResourceGroupId rootBar = new ResourceGroupId(root, "bar");
assertTrue(root.isAncestorOf(rootA));
assertTrue(root.isAncestorOf(rootAFoo));
assertTrue(root.isAncestorOf(rootBar));
assertTrue(rootA.isAncestorOf(rootAFoo));
assertFalse(rootA.isAncestorOf(rootBar));
assertFalse(rootAFoo.isAncestorOf(rootBar));
assertFalse(rootBar.isAncestorOf(rootAFoo));
assertFalse(rootAFoo.isAncestorOf(root));
assertFalse(root.isAncestorOf(root));
assertFalse(rootAFoo.isAncestorOf(rootAFoo));
}
|
@Description("inverse of Binomial cdf given numberOfTrials, successProbability parameters and p")
@ScalarFunction
@SqlType(StandardTypes.INTEGER)
public static long inverseBinomialCdf(
@SqlType(StandardTypes.INTEGER) long numberOfTrials,
@SqlType(StandardTypes.DOUBLE) double successProbability,
@SqlType(StandardTypes.DOUBLE) double p)
{
checkCondition(p >= 0 && p <= 1, INVALID_FUNCTION_ARGUMENT, "p must be in the interval [0, 1]");
checkCondition(successProbability >= 0 && successProbability <= 1, INVALID_FUNCTION_ARGUMENT, "successProbability must be in the interval [0, 1]");
checkCondition(numberOfTrials > 0, INVALID_FUNCTION_ARGUMENT, "numberOfTrials must be greater than 0");
BinomialDistribution distribution = new BinomialDistribution(null, (int) numberOfTrials, successProbability);
return distribution.inverseCumulativeProbability(p);
}
|
@Test
public void testInverseBinomialCdf()
{
assertFunction("inverse_binomial_cdf(20, 0.5, 0.5)", INTEGER, 10);
assertFunction("inverse_binomial_cdf(20, 0.5, 0.0)", INTEGER, 0);
assertFunction("inverse_binomial_cdf(20, 0.5, 1.0)", INTEGER, 20);
assertInvalidFunction("inverse_binomial_cdf(5, -0.5, 0.3)", "successProbability must be in the interval [0, 1]");
assertInvalidFunction("inverse_binomial_cdf(5, 1.5, 0.3)", "successProbability must be in the interval [0, 1]");
assertInvalidFunction("inverse_binomial_cdf(5, 0.5, -3.0)", "p must be in the interval [0, 1]");
assertInvalidFunction("inverse_binomial_cdf(5, 0.5, 3.0)", "p must be in the interval [0, 1]");
assertInvalidFunction("inverse_binomial_cdf(-5, 0.5, 0.3)", "numberOfTrials must be greater than 0");
}
|
@Override
public void doInject(RequestResource resource, RamContext context, LoginIdentityContext result) {
String accessKey = context.getAccessKey();
String secretKey = context.getSecretKey();
// STS 临时凭证鉴权的优先级高于 AK/SK 鉴权
if (StsConfig.getInstance().isStsOn()) {
StsCredential stsCredential = StsCredentialHolder.getInstance().getStsCredential();
accessKey = stsCredential.getAccessKeyId();
secretKey = stsCredential.getAccessKeySecret();
result.setParameter(IdentifyConstants.SECURITY_TOKEN_HEADER, stsCredential.getSecurityToken());
}
if (StringUtils.isNotEmpty(accessKey) && StringUtils.isNotBlank(secretKey)) {
result.setParameter(ACCESS_KEY_HEADER, accessKey);
}
String signatureKey = secretKey;
if (StringUtils.isNotEmpty(context.getRegionId())) {
signatureKey = CalculateV4SigningKeyUtil
.finalSigningKeyStringWithDefaultInfo(secretKey, context.getRegionId());
result.setParameter(RamConstants.SIGNATURE_VERSION, RamConstants.V4);
}
Map<String, String> signHeaders = SpasAdapter
.getSignHeaders(getResource(resource.getNamespace(), resource.getGroup()), signatureKey);
result.setParameters(signHeaders);
}
|
@Test
void testDoInjectWithGroup() throws Exception {
resource.setNamespace("");
LoginIdentityContext actual = new LoginIdentityContext();
configResourceInjector.doInject(resource, ramContext, actual);
assertEquals(3, actual.getAllKey().size());
assertEquals(PropertyKeyConst.ACCESS_KEY, actual.getParameter("Spas-AccessKey"));
assertTrue(actual.getAllKey().contains("Timestamp"));
assertTrue(actual.getAllKey().contains("Spas-Signature"));
}
|
@Override
public Num calculate(Num price, Num amount) {
return price.zero();
}
|
@Test
public void calculateBuyPosition() {
// Holding a bought asset should not incur borrowing costs
int holdingPeriod = 2;
Trade entry = Trade.buyAt(0, DoubleNum.valueOf(100), DoubleNum.valueOf(1));
Trade exit = Trade.sellAt(holdingPeriod, DoubleNum.valueOf(110), DoubleNum.valueOf(1));
Position position = new Position(entry, exit, new ZeroCostModel(), borrowingModel);
Num costsFromPosition = position.getHoldingCost();
Num costsFromModel = borrowingModel.calculate(position, holdingPeriod);
assertNumEquals(costsFromModel, costsFromPosition);
assertNumEquals(costsFromModel, DoubleNum.valueOf(0));
}
|
@SqlNullable
@Description("Returns an array of interior rings of a polygon")
@ScalarFunction("ST_InteriorRings")
@SqlType("array(" + GEOMETRY_TYPE_NAME + ")")
public static Block stInteriorRings(@SqlType(GEOMETRY_TYPE_NAME) Slice input)
{
Geometry geometry = deserialize(input);
validateType("ST_InteriorRings", geometry, EnumSet.of(POLYGON));
if (geometry.isEmpty()) {
return null;
}
org.locationtech.jts.geom.Polygon polygon = (org.locationtech.jts.geom.Polygon) geometry;
BlockBuilder blockBuilder = GEOMETRY.createBlockBuilder(null, polygon.getNumInteriorRing());
for (int i = 0; i < polygon.getNumInteriorRing(); i++) {
GEOMETRY.writeSlice(blockBuilder, serialize((LineString) polygon.getInteriorRingN(i)));
}
return blockBuilder.build();
}
|
@Test
public void testSTInteriorRings()
{
assertInvalidInteriorRings("POINT (2 3)", "POINT");
assertInvalidInteriorRings("LINESTRING EMPTY", "LINE_STRING");
assertInvalidInteriorRings("MULTIPOINT (30 20, 60 70)", "MULTI_POINT");
assertInvalidInteriorRings("MULTILINESTRING ((1 10, 100 1000), (2 2, 1 0, 5 6))", "MULTI_LINE_STRING");
assertInvalidInteriorRings("MULTIPOLYGON (((1 1, 1 3, 3 3, 3 1, 1 1)), ((0 0, 0 2, 2 2, 2 0, 0 0)))", "MULTI_POLYGON");
assertInvalidInteriorRings("GEOMETRYCOLLECTION (POINT (1 1), POINT (2 3), LINESTRING (5 8, 13 21))", "GEOMETRY_COLLECTION");
assertFunction("ST_InteriorRings(ST_GeometryFromText('POLYGON EMPTY'))", new ArrayType(GEOMETRY), null);
assertInteriorRings("POLYGON ((0 0, 0 1, 1 1, 1 0, 0 0))");
assertInteriorRings("POLYGON ((0 0, 0 3, 3 3, 3 0, 0 0), (1 1, 2 1, 2 2, 1 2, 1 1))", "LINESTRING (1 1, 2 1, 2 2, 1 2, 1 1)");
assertInteriorRings("POLYGON ((0 0, 0 5, 5 5, 5 0, 0 0), (1 1, 2 1, 2 2, 1 2, 1 1), (3 3, 4 3, 4 4, 3 4, 3 3))",
"LINESTRING (1 1, 2 1, 2 2, 1 2, 1 1)", "LINESTRING (3 3, 4 3, 4 4, 3 4, 3 3)");
}
|
public List<CompactionTask> produce() {
// get all CF files sorted by key range start (L1+)
List<SstFileMetaData> sstSortedByCfAndStartingKeys =
metadataSupplier.get().stream()
.filter(l -> l.level() > 0) // let RocksDB deal with L0
.sorted(SST_COMPARATOR)
.collect(Collectors.toList());
LOG.trace("Input files: {}", sstSortedByCfAndStartingKeys.size());
List<CompactionTask> tasks = groupIntoTasks(sstSortedByCfAndStartingKeys);
tasks.sort(Comparator.<CompactionTask>comparingInt(t -> t.files.size()).reversed());
return tasks.subList(0, Math.min(tasks.size(), settings.maxManualCompactions));
}
|
@Test
void testNotGroupingDifferentColumnFamilies() {
ColumnFamilyHandle cf1 = rocksDBExtension.createNewColumnFamily("cf1");
defaultCfLookup.add(cf1);
ColumnFamilyHandle cf2 = rocksDBExtension.createNewColumnFamily("cf2");
defaultCfLookup.add(cf2);
assertThat(
produce(
configBuilder().build(),
sstBuilder().setColumnFamily(cf1).build(),
sstBuilder().setColumnFamily(cf2).build()))
.hasSize(2);
}
|
@Override
public NSImage folderIcon(final Integer size) {
NSImage folder = this.iconNamed("NSFolder", size);
if(null == folder) {
return this.iconNamed("NSFolder", size);
}
return folder;
}
|
@Test
public void testFolderIcon64() {
final NSImage icon = new NSImageIconCache().folderIcon(64);
assertNotNull(icon);
assertTrue(icon.isValid());
assertFalse(icon.isTemplate());
assertEquals(64, icon.size().width.intValue());
assertEquals(64, icon.size().height.intValue());
assertTrue(icon.representations().count().intValue() >= 1);
}
|
@Override
public KTable<Windowed<K>, V> aggregate(final Initializer<V> initializer,
final Merger<? super K, V> sessionMerger) {
return aggregate(initializer, sessionMerger, Materialized.with(null, null));
}
|
@Test
public void shouldNotHaveNullMaterialized2OnAggregate() {
assertThrows(NullPointerException.class, () -> windowedCogroupedStream.aggregate(MockInitializer.STRING_INIT,
sessionMerger, Named.as("name"), null));
}
|
public TaskRunScheduler getTaskRunScheduler() {
return taskRunScheduler;
}
|
@Test
public void testTaskRunMergeTimeFirst2() {
TaskRunManager taskRunManager = new TaskRunManager();
Task task = new Task("test");
task.setDefinition("select 1");
long taskId = 1;
TaskRun taskRun1 = TaskRunBuilder
.newBuilder(task)
.setExecuteOption(DEFAULT_MERGE_OPTION)
.build();
long now = System.currentTimeMillis();
taskRun1.setTaskId(taskId);
taskRun1.initStatus("1", now + 10);
taskRun1.getStatus().setPriority(0);
TaskRun taskRun2 = TaskRunBuilder
.newBuilder(task)
.setExecuteOption(DEFAULT_MERGE_OPTION)
.build();
taskRun2.setTaskId(taskId);
taskRun2.initStatus("2", now);
taskRun2.getStatus().setPriority(0);
taskRunManager.arrangeTaskRun(taskRun2, false);
taskRunManager.arrangeTaskRun(taskRun1, false);
TaskRunScheduler taskRunScheduler = taskRunManager.getTaskRunScheduler();
List<TaskRun> taskRuns = Lists.newArrayList(taskRunScheduler.getPendingTaskRunsByTaskId(taskId));
Assert.assertTrue(taskRuns != null);
Assert.assertEquals(1, taskRuns.size());
TaskRun taskRun = taskRuns.get(0);
Assert.assertEquals(now, taskRun.getStatus().getCreateTime());
}
|
static Serde<List<?>> createSerde(final PersistenceSchema schema) {
final List<SimpleColumn> columns = schema.columns();
if (columns.isEmpty()) {
// No columns:
return new KsqlVoidSerde<>();
}
if (columns.size() != 1) {
throw new KsqlException("The '" + FormatFactory.KAFKA.name()
+ "' format only supports a single field. Got: " + columns);
}
final SimpleColumn singleColumn = columns.get(0);
final Class<?> javaType = SchemaConverters.sqlToJavaConverter()
.toJavaType(singleColumn.type());
return createSerde(singleColumn, javaType);
}
|
@Test
public void shouldThrowIfStruct() {
// Given:
final PersistenceSchema schema = schemaWithFieldOfType(SqlTypes.struct()
.field("f0", SqlTypes.STRING)
.build());
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> KafkaSerdeFactory.createSerde(schema)
);
// Then:
assertThat(e.getMessage(), containsString("The 'KAFKA' format does not support type 'STRUCT'"));
}
|
public static Field p(String fieldName) {
return SELECT_ALL_FROM_SOURCES_ALL.where(fieldName);
}
|
@Test
void test_programmability_2() {
Map<String, String> map = stringStringMap("a", "1", "b", "2", "c", "3");
Query q = Q.p();
map.forEach((k, v) -> q.and(Q.p(k).contains(v)));
assertEquals(q.build(), "yql=select * from sources * where a contains \"1\" and b contains \"2\" and c contains \"3\"");
}
|
public static <K, InputT, AccumT> ParDoFn create(
PipelineOptions options,
KvCoder<K, ?> inputElementCoder,
@Nullable CloudObject cloudUserFn,
@Nullable List<SideInputInfo> sideInputInfos,
List<Receiver> receivers,
DataflowExecutionContext<?> executionContext,
DataflowOperationContext operationContext)
throws Exception {
AppliedCombineFn<K, InputT, AccumT, ?> combineFn;
SideInputReader sideInputReader;
StepContext stepContext;
if (cloudUserFn == null) {
combineFn = null;
sideInputReader = NullSideInputReader.empty();
stepContext = null;
} else {
Object deserializedFn =
SerializableUtils.deserializeFromByteArray(
getBytes(cloudUserFn, PropertyNames.SERIALIZED_FN), "serialized combine fn");
@SuppressWarnings("unchecked")
AppliedCombineFn<K, InputT, AccumT, ?> combineFnUnchecked =
((AppliedCombineFn<K, InputT, AccumT, ?>) deserializedFn);
combineFn = combineFnUnchecked;
sideInputReader =
executionContext.getSideInputReader(
sideInputInfos, combineFn.getSideInputViews(), operationContext);
stepContext = executionContext.getStepContext(operationContext);
}
return create(
options, inputElementCoder, combineFn, sideInputReader, receivers.get(0), stepContext);
}
|
@Test
public void testCreateWithCombinerAndStreaming() throws Exception {
StreamingOptions options = PipelineOptionsFactory.as(StreamingOptions.class);
options.setStreaming(true);
Coder keyCoder = StringUtf8Coder.of();
Coder valueCoder = BigEndianIntegerCoder.of();
KvCoder<String, Integer> kvCoder = KvCoder.of(keyCoder, valueCoder);
TestOutputReceiver receiver =
new TestOutputReceiver(
new ElementByteSizeObservableCoder(WindowedValue.getValueOnlyCoder(kvCoder)),
counterSet,
NameContextsForTests.nameContextForTest());
ParDoFn pgbk =
PartialGroupByKeyParDoFns.create(
options,
kvCoder,
AppliedCombineFn.withInputCoder(
Sum.ofIntegers(), CoderRegistry.createDefault(), kvCoder),
NullSideInputReader.empty(),
receiver,
null);
assertTrue(pgbk instanceof SimplePartialGroupByKeyParDoFn);
}
|
protected FileStatus[] listStatus(JobConf job) throws IOException {
Path[] dirs = getInputPaths(job);
if (dirs.length == 0) {
throw new IOException("No input paths specified in job");
}
// get tokens for all the required FileSystems..
TokenCache.obtainTokensForNamenodes(job.getCredentials(), dirs, job);
// Whether we need to recursive look into the directory structure
boolean recursive = job.getBoolean(INPUT_DIR_RECURSIVE, false);
// creates a MultiPathFilter with the hiddenFileFilter and the
// user provided one (if any).
List<PathFilter> filters = new ArrayList<PathFilter>();
filters.add(hiddenFileFilter);
PathFilter jobFilter = getInputPathFilter(job);
if (jobFilter != null) {
filters.add(jobFilter);
}
PathFilter inputFilter = new MultiPathFilter(filters);
FileStatus[] result;
int numThreads = job
.getInt(
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.LIST_STATUS_NUM_THREADS,
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.DEFAULT_LIST_STATUS_NUM_THREADS);
StopWatch sw = new StopWatch().start();
if (numThreads == 1) {
List<FileStatus> locatedFiles = singleThreadedListStatus(job, dirs, inputFilter, recursive);
result = locatedFiles.toArray(new FileStatus[locatedFiles.size()]);
} else {
Iterable<FileStatus> locatedFiles = null;
try {
LocatedFileStatusFetcher locatedFileStatusFetcher = new LocatedFileStatusFetcher(
job, dirs, recursive, inputFilter, false);
locatedFiles = locatedFileStatusFetcher.getFileStatuses();
} catch (InterruptedException e) {
throw (IOException)
new InterruptedIOException("Interrupted while getting file statuses")
.initCause(e);
}
result = Iterables.toArray(locatedFiles, FileStatus.class);
}
sw.stop();
if (LOG.isDebugEnabled()) {
LOG.debug("Time taken to get FileStatuses: "
+ sw.now(TimeUnit.MILLISECONDS));
}
LOG.info("Total input files to process : " + result.length);
return result;
}
|
@Test
public void testListStatusSimple() throws IOException {
Configuration conf = new Configuration();
conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS, numThreads);
List<Path> expectedPaths = org.apache.hadoop.mapreduce.lib.input.TestFileInputFormat
.configureTestSimple(conf, localFs);
JobConf jobConf = new JobConf(conf);
TextInputFormat fif = new TextInputFormat();
fif.configure(jobConf);
FileStatus[] statuses = fif.listStatus(jobConf);
org.apache.hadoop.mapreduce.lib.input.TestFileInputFormat
.verifyFileStatuses(expectedPaths, Lists.newArrayList(statuses),
localFs);
}
|
public static PulsarClient createClient(ServiceConfiguration brokerConfig) throws PulsarClientException {
ClientBuilder clientBuilder = PulsarClient.builder()
.memoryLimit(0, SizeUnit.BYTES);
// Apply all arbitrary configuration. This must be called before setting any fields annotated as
// @Secret on the ClientConfigurationData object because of the way they are serialized.
// See https://github.com/apache/pulsar/issues/8509 for more information.
clientBuilder.loadConf(PropertiesUtils.filterAndMapProperties(brokerConfig.getProperties(), "brokerClient_"));
if (isNotBlank(brokerConfig.getBrokerClientAuthenticationPlugin())) {
clientBuilder.authentication(brokerConfig.getBrokerClientAuthenticationPlugin(),
brokerConfig.getBrokerClientAuthenticationParameters());
}
AdvertisedListener internalListener = ServiceConfigurationUtils.getInternalListener(brokerConfig, "pulsar+ssl");
if (internalListener.getBrokerServiceUrlTls() != null && brokerConfig.isBrokerClientTlsEnabled()) {
clientBuilder.serviceUrl(internalListener.getBrokerServiceUrlTls().toString())
.allowTlsInsecureConnection(brokerConfig.isTlsAllowInsecureConnection())
.enableTlsHostnameVerification(brokerConfig.isTlsHostnameVerificationEnabled())
.sslFactoryPlugin(brokerConfig.getBrokerClientSslFactoryPlugin())
.sslFactoryPluginParams(brokerConfig.getBrokerClientSslFactoryPluginParams());
if (brokerConfig.isBrokerClientTlsEnabledWithKeyStore()) {
clientBuilder.useKeyStoreTls(true)
.tlsKeyStoreType(brokerConfig.getBrokerClientTlsKeyStoreType())
.tlsKeyStorePath(brokerConfig.getBrokerClientTlsKeyStore())
.tlsKeyStorePassword(brokerConfig.getBrokerClientTlsKeyStorePassword())
.tlsTrustStoreType(brokerConfig.getBrokerClientTlsTrustStoreType())
.tlsTrustStorePath(brokerConfig.getBrokerClientTlsTrustStore())
.tlsTrustStorePassword(brokerConfig.getBrokerClientTlsTrustStorePassword());
} else {
clientBuilder.tlsTrustCertsFilePath(brokerConfig.getBrokerClientTrustCertsFilePath())
.tlsKeyFilePath(brokerConfig.getBrokerClientKeyFilePath())
.tlsCertificateFilePath(brokerConfig.getBrokerClientCertificateFilePath());
}
} else {
internalListener = ServiceConfigurationUtils.getInternalListener(brokerConfig, "pulsar");
clientBuilder.serviceUrl(internalListener.getBrokerServiceUrl().toString());
}
return clientBuilder.build();
}
|
@Test
public void testUseTlsUrlWithPEM() throws PulsarClientException {
ServiceConfiguration serviceConfiguration = spy(ServiceConfiguration.class);
serviceConfiguration.setBrokerServicePortTls(Optional.of(6651));
serviceConfiguration.setBrokerClientTlsEnabled(true);
serviceConfiguration.setProperties(new Properties());
@Cleanup
PulsarClient ignored = CompactorTool.createClient(serviceConfiguration);
verify(serviceConfiguration, times(1)).isBrokerClientTlsEnabled();
verify(serviceConfiguration, times(1)).isTlsAllowInsecureConnection();
verify(serviceConfiguration, times(1)).getBrokerClientKeyFilePath();
verify(serviceConfiguration, times(1)).getBrokerClientTrustCertsFilePath();
verify(serviceConfiguration, times(1)).getBrokerClientCertificateFilePath();
serviceConfiguration.setBrokerClientTlsTrustStorePassword(MockedPulsarServiceBaseTest.BROKER_KEYSTORE_PW);
}
|
public OptionalInt leaderEpochFor(TopicPartition tp) {
PartitionMetadata partitionMetadata = metadataByPartition.get(tp);
if (partitionMetadata == null || !partitionMetadata.leaderEpoch.isPresent()) {
return OptionalInt.empty();
} else {
return OptionalInt.of(partitionMetadata.leaderEpoch.get());
}
}
|
@Test
public void testLeaderEpochFor() {
// Setup partition 0 with a leader-epoch of 10.
TopicPartition topicPartition1 = new TopicPartition("topic", 0);
MetadataResponse.PartitionMetadata partitionMetadata1 = new MetadataResponse.PartitionMetadata(
Errors.NONE,
topicPartition1,
Optional.of(5),
Optional.of(10),
Arrays.asList(5, 6, 7),
Arrays.asList(5, 6, 7),
Collections.emptyList());
// Setup partition 1 with an unknown leader epoch.
TopicPartition topicPartition2 = new TopicPartition("topic", 1);
MetadataResponse.PartitionMetadata partitionMetadata2 = new MetadataResponse.PartitionMetadata(
Errors.NONE,
topicPartition2,
Optional.of(5),
Optional.empty(),
Arrays.asList(5, 6, 7),
Arrays.asList(5, 6, 7),
Collections.emptyList());
Map<Integer, Node> nodesById = new HashMap<>();
nodesById.put(5, new Node(5, "localhost", 2077));
nodesById.put(6, new Node(6, "localhost", 2078));
nodesById.put(7, new Node(7, "localhost", 2079));
MetadataSnapshot cache = new MetadataSnapshot("clusterId",
nodesById,
Arrays.asList(partitionMetadata1, partitionMetadata2),
Collections.emptySet(),
Collections.emptySet(),
Collections.emptySet(),
null,
Collections.emptyMap());
assertEquals(OptionalInt.of(10), cache.leaderEpochFor(topicPartition1));
assertEquals(OptionalInt.empty(), cache.leaderEpochFor(topicPartition2));
assertEquals(OptionalInt.empty(), cache.leaderEpochFor(new TopicPartition("topic_missing", 0)));
}
|
public IsJson(Matcher<? super ReadContext> jsonMatcher) {
this.jsonMatcher = jsonMatcher;
}
|
@Test
public void shouldMatchValidJson() {
assertThat(VALID_JSON, isJson());
assertThat(BOOKS_JSON_STRING, isJson());
}
|
@VisibleForTesting
void setIsPartialBufferCleanupRequired() {
isPartialBufferCleanupRequired = true;
}
|
@TestTemplate
void testSkipPartialDataLongRecordOccupyEntireBuffer() throws Exception {
final BufferWritingResultPartition writer = createResultPartition();
final PipelinedApproximateSubpartition subpartition =
getPipelinedApproximateSubpartition(writer);
writer.emitRecord(toByteBuffer(0, 1, 2, 3, 4, 5, 6, 7, 8, 42), 0);
assertContent(requireNonNull(subpartition.pollBuffer()).buffer(), null, 0, 1, 2, 3);
subpartition.setIsPartialBufferCleanupRequired();
assertThat(subpartition.pollBuffer()).isNull();
}
|
public static int nextCapacity(int current) {
assert current > 0 && Long.bitCount(current) == 1 : "Capacity must be a power of two.";
if (current < MIN_CAPACITY / 2) {
current = MIN_CAPACITY / 2;
}
current <<= 1;
if (current < 0) {
throw new RuntimeException("Maximum capacity exceeded.");
}
return current;
}
|
@Test(expected = AssertionError.class)
@RequireAssertEnabled
public void testNextCapacity_withInt_shouldThrowIfCapacityNoPowerOfTwo() {
int capacity = 23;
nextCapacity(capacity);
}
|
@Override
public void validate() throws TelegramApiValidationException {
if (inlineQueryId.isEmpty()) {
throw new TelegramApiValidationException("InlineQueryId can't be empty", this);
}
for (InlineQueryResult result : results) {
result.validate();
}
if (button != null) {
button.validate();
}
}
|
@Test
void testSwitchPmTextCanNotBeEmpty() {
answerInlineQuery.setInlineQueryId("RANDOMEID");
answerInlineQuery.setResults(new ArrayList<>());
answerInlineQuery.setButton(InlineQueryResultsButton
.builder()
.text("")
.build());
try {
answerInlineQuery.validate();
} catch (TelegramApiValidationException e) {
assertEquals("Text can't be empty", e.getMessage());
}
}
|
@Override
public void run() {
try {
backgroundJobServer.getJobSteward().notifyThreadOccupied();
MDCMapper.loadMDCContextFromJob(job);
performJob();
} catch (Exception e) {
if (isJobDeletedWhileProcessing(e)) {
// nothing to do anymore as Job is deleted
return;
} else if (isJobServerStopped(e)) {
updateJobStateToFailedAndRunJobFilters("Job processing was stopped as background job server has stopped", e);
Thread.currentThread().interrupt();
} else if (isJobNotFoundException(e)) {
updateJobStateToFailedAndRunJobFilters("Job method not found", e);
} else {
updateJobStateToFailedAndRunJobFilters("An exception occurred during the performance of the job", e);
}
} finally {
backgroundJobServer.getJobSteward().notifyThreadIdle();
MDC.clear();
}
}
|
@Test
void mdcIsAlsoAvailableDuringLoggingOfJobFailure() throws Exception {
// GIVEN
Job job = anEnqueuedJob().build();
MDC.put("testKey", "testValue");
MDCMapper.saveMDCContextToJob(job);
BackgroundJobRunner runner = mock(BackgroundJobRunner.class);
doThrow(new InvocationTargetException(new RuntimeException("test error"))).when(runner).run(job);
when(backgroundJobServer.getBackgroundJobRunner(job)).thenReturn(runner);
BackgroundJobPerformer backgroundJobPerformer = new BackgroundJobPerformer(backgroundJobServer, job);
ListAppender logger = LoggerAssert.initFor(backgroundJobPerformer);
// WHEN
backgroundJobPerformer.run();
// THEN
assertThat(logger)
.hasWarningMessageContaining(
"Job(id=" + job.getId() + ", jobName='" + job.getJobName() + "') processing failed",
Map.of(
"jobrunr.jobId", job.getId().toString(),
"jobrunr.jobName", job.getJobName(),
"testKey", "testValue"
));
assertThat(MDC.getCopyOfContextMap()).isNullOrEmpty(); // backgroundJobPerformer clears MDC Context
}
|
@Override
public void handle(HttpExchange httpExchange) {
try {
String requestUri = httpExchange.getRequestURI().toString();
requestUri = sanitizeRequestUri(requestUri);
final String toServe = requestUri.substring((contextPath + "/").length());
final URL resource = this.getClass().getClassLoader().getResource(rootDir + toServe);
if (resource != null) {
httpExchange.getResponseHeaders().add(ContentType._HEADER_NAME, ContentType.from(toServe));
httpExchange.getResponseHeaders().add("Access-Control-Allow-Origin", "*");
httpExchange.sendResponseHeaders(200, 0);
copyResourceToResponseBody(resource, httpExchange);
} else {
httpExchange.sendResponseHeaders(404, -1);
}
} catch (Exception shouldNotHappen) {
LOGGER.error("Error serving static files", shouldNotHappen);
}
}
|
@Test
void returns404IfFileNotFound() throws IOException {
when(httpExchange.getRequestURI()).thenReturn(URI.create("/dashboard/404.html"));
staticFileHttpHandler.handle(httpExchange);
verify(httpExchange).sendResponseHeaders(404, -1L);
}
|
@Override
public boolean dropTable(TableIdentifier identifier, boolean purge) {
if (!tableExists(identifier)) {
return false;
}
EcsURI tableObjectURI = tableURI(identifier);
if (purge) {
// if re-use the same instance, current() will throw exception.
TableOperations ops = newTableOps(identifier);
TableMetadata current = ops.current();
if (current == null) {
return false;
}
CatalogUtil.dropTableData(ops.io(), current);
}
client.deleteObject(tableObjectURI.bucket(), tableObjectURI.name());
return true;
}
|
@Test
public void testRegisterTable() {
TableIdentifier identifier = TableIdentifier.of("a", "t1");
ecsCatalog.createTable(identifier, SCHEMA);
Table registeringTable = ecsCatalog.loadTable(identifier);
ecsCatalog.dropTable(identifier, false);
TableOperations ops = ((HasTableOperations) registeringTable).operations();
String metadataLocation = ((EcsTableOperations) ops).currentMetadataLocation();
Table registeredTable = ecsCatalog.registerTable(identifier, metadataLocation);
assertThat(registeredTable).isNotNull();
String expectedMetadataLocation =
((HasTableOperations) registeredTable).operations().current().metadataFileLocation();
assertThat(metadataLocation).isEqualTo(expectedMetadataLocation);
assertThat(ecsCatalog.loadTable(identifier)).isNotNull();
assertThat(ecsCatalog.dropTable(identifier, true)).isTrue();
}
|
public void createTask(CreateTaskRequest request) throws Throwable {
taskManager.createTask(request.id(), request.spec());
}
|
@Test
public void testWorkersExitingAtDifferentTimes() throws Exception {
MockTime time = new MockTime(0, 0, 0);
Scheduler scheduler = new MockScheduler(time);
try (MiniTrogdorCluster cluster = new MiniTrogdorCluster.Builder().
addCoordinator("node01").
addAgent("node02").
addAgent("node03").
scheduler(scheduler).
build()) {
CoordinatorClient coordinatorClient = cluster.coordinatorClient();
new ExpectedTasks().waitFor(coordinatorClient);
HashMap<String, Long> nodeToExitMs = new HashMap<>();
nodeToExitMs.put("node02", 10L);
nodeToExitMs.put("node03", 20L);
SampleTaskSpec fooSpec =
new SampleTaskSpec(2, 100, nodeToExitMs, "");
coordinatorClient.createTask(new CreateTaskRequest("foo", fooSpec));
new ExpectedTasks().
addTask(new ExpectedTaskBuilder("foo").
taskState(new TaskPending(fooSpec)).
build()).
waitFor(coordinatorClient);
time.sleep(2);
ObjectNode status1 = new ObjectNode(JsonNodeFactory.instance);
status1.set("node02", new TextNode("active"));
status1.set("node03", new TextNode("active"));
new ExpectedTasks().
addTask(new ExpectedTaskBuilder("foo").
taskState(new TaskRunning(fooSpec, 2, status1)).
workerState(new WorkerRunning("foo", fooSpec, 2, new TextNode("active"))).
build()).
waitFor(coordinatorClient).
waitFor(cluster.agentClient("node02")).
waitFor(cluster.agentClient("node03"));
time.sleep(10);
ObjectNode status2 = new ObjectNode(JsonNodeFactory.instance);
status2.set("node02", new TextNode("halted"));
status2.set("node03", new TextNode("active"));
new ExpectedTasks().
addTask(new ExpectedTaskBuilder("foo").
taskState(new TaskRunning(fooSpec, 2, status2)).
workerState(new WorkerRunning("foo", fooSpec, 2, new TextNode("active"))).
build()).
waitFor(coordinatorClient).
waitFor(cluster.agentClient("node03"));
new ExpectedTasks().
addTask(new ExpectedTaskBuilder("foo").
taskState(new TaskRunning(fooSpec, 2, status2)).
workerState(new WorkerDone("foo", fooSpec, 2, 12, new TextNode("halted"), "")).
build()).
waitFor(cluster.agentClient("node02"));
time.sleep(10);
ObjectNode status3 = new ObjectNode(JsonNodeFactory.instance);
status3.set("node02", new TextNode("halted"));
status3.set("node03", new TextNode("halted"));
new ExpectedTasks().
addTask(new ExpectedTaskBuilder("foo").
taskState(new TaskDone(fooSpec, 2, 22, "",
false, status3)).
build()).
waitFor(coordinatorClient);
}
}
|
@Override public String getResourceOutputNodeType() {
return null;
}
|
@Test
public void testGetResourceOutputNodeType() throws Exception {
assertNull( analyzer.getResourceOutputNodeType() );
}
|
@Override
public void run() {
if (processedEvents.get() > 0) {
LOG.debug("checkpointing offset after reaching timeout, with a batch of {}", processedEvents.get());
eventContext.updateCheckpointAsync()
.subscribe(unused -> LOG.debug("Processed one event..."),
error -> LOG.debug("Error when updating Checkpoint: {}", error.getMessage()),
() -> {
LOG.debug("Checkpoint updated.");
processedEvents.set(0);
});
} else {
LOG.debug("skip checkpointing offset even if timeout is reached. No events processed");
}
}
|
@Test
void testProcessedEventsResetWhenCheckpointUpdated() {
var processedEvents = new AtomicInteger(1);
var eventContext = Mockito.mock(EventContext.class);
Mockito.when(eventContext.updateCheckpointAsync())
.thenReturn(Mono.just("").then());
var timerTask = new EventHubsCheckpointUpdaterTimerTask(eventContext, processedEvents);
timerTask.run();
assertEquals(0, processedEvents.get());
}
|
public void removeMembership(String groupMembershipUuid) {
try (DbSession dbSession = dbClient.openSession(false)) {
UserGroupDto userGroupDto = findMembershipOrThrow(groupMembershipUuid, dbSession);
removeMembership(userGroupDto.getGroupUuid(), userGroupDto.getUserUuid());
}
}
|
@Test
public void removeMembership_ifGroupAndUserFound_shouldRemoveMemberFromGroup() {
mockAdminInGroup(GROUP_A, USER_1);
GroupDto groupDto = mockGroupDto();
UserDto userDto = mockUserDto();
groupMembershipService.removeMembership(GROUP_A, USER_1);
verify(userGroupDao).delete(dbSession, groupDto, userDto);
verify(dbSession).commit();
}
|
public synchronized void purgeApplication(String appName) {
File appDir = new File(appsDir, appName);
if (!FilePathValidator.validateFile(appDir, appsDir)) {
throw new ApplicationException("Application attempting to create files outside the apps directory");
}
try {
Tools.removeDirectory(appDir);
} catch (IOException e) {
throw new ApplicationException("Unable to purge application " + appName, e);
}
if (appDir.exists()) {
throw new ApplicationException("Unable to purge application " + appName);
}
}
|
@Test // (expected = ApplicationException.class)
public void purgeBadApp() throws IOException {
aar.purgeApplication("org.foo.BAD");
}
|
public static SslContextFactory.Client createClientSideSslContextFactory(AbstractConfig config) {
Map<String, Object> sslConfigValues = config.valuesWithPrefixAllOrNothing("listeners.https.");
final SslContextFactory.Client ssl = new SslContextFactory.Client();
configureSslContextFactoryKeyStore(ssl, sslConfigValues);
configureSslContextFactoryTrustStore(ssl, sslConfigValues);
configureSslContextFactoryAlgorithms(ssl, sslConfigValues);
configureSslContextFactoryEndpointIdentification(ssl, sslConfigValues);
return ssl;
}
|
@Test
public void testCreateClientSideSslContextFactory() {
Map<String, String> configMap = new HashMap<>();
configMap.put("ssl.keystore.location", "/path/to/keystore");
configMap.put("ssl.keystore.password", "123456");
configMap.put("ssl.key.password", "123456");
configMap.put("ssl.truststore.location", "/path/to/truststore");
configMap.put("ssl.truststore.password", "123456");
configMap.put("ssl.provider", "SunJSSE");
configMap.put("ssl.cipher.suites", "SSL_RSA_WITH_RC4_128_SHA,SSL_RSA_WITH_RC4_128_MD5");
configMap.put("ssl.secure.random.implementation", "SHA1PRNG");
configMap.put("ssl.client.auth", "required");
configMap.put("ssl.endpoint.identification.algorithm", "HTTPS");
configMap.put("ssl.keystore.type", "JKS");
configMap.put("ssl.protocol", "TLS");
configMap.put("ssl.truststore.type", "JKS");
configMap.put("ssl.enabled.protocols", "TLSv1.2,TLSv1.1,TLSv1");
configMap.put("ssl.keymanager.algorithm", "SunX509");
configMap.put("ssl.trustmanager.algorithm", "PKIX");
RestServerConfig config = RestServerConfig.forPublic(null, configMap);
SslContextFactory.Client ssl = SSLUtils.createClientSideSslContextFactory(config);
assertEquals("file:///path/to/keystore", ssl.getKeyStorePath());
assertEquals("file:///path/to/truststore", ssl.getTrustStorePath());
assertEquals("SunJSSE", ssl.getProvider());
assertArrayEquals(new String[] {"SSL_RSA_WITH_RC4_128_SHA", "SSL_RSA_WITH_RC4_128_MD5"}, ssl.getIncludeCipherSuites());
assertEquals("SHA1PRNG", ssl.getSecureRandomAlgorithm());
assertEquals("JKS", ssl.getKeyStoreType());
assertEquals("JKS", ssl.getTrustStoreType());
assertEquals("TLS", ssl.getProtocol());
assertArrayEquals(new String[] {"TLSv1.2", "TLSv1.1", "TLSv1"}, ssl.getIncludeProtocols());
assertEquals("SunX509", ssl.getKeyManagerFactoryAlgorithm());
assertEquals("PKIX", ssl.getTrustManagerFactoryAlgorithm());
}
|
static void verifyDeterministic(ProtoCoder<?> coder) throws NonDeterministicException {
Class<? extends Message> message = coder.getMessageType();
ExtensionRegistry registry = coder.getExtensionRegistry();
Set<Descriptor> descriptors = getRecursiveDescriptorsForClass(message, registry);
for (Descriptor d : descriptors) {
for (FieldDescriptor fd : d.getFields()) {
// If there is a transitively reachable Protocol Buffers map field, then this object cannot
// be encoded deterministically.
if (fd.isMapField()) {
String reason =
String.format(
"Protocol Buffers message %s transitively includes Map field %s (from file %s)."
+ " Maps cannot be deterministically encoded.",
message.getName(), fd.getFullName(), fd.getFile().getFullName());
throw new NonDeterministicException(coder, reason);
}
}
}
}
|
@Test
public void testMessageWithTransitiveMapIsNotDeterministic() throws NonDeterministicException {
String mapFieldName = MessageWithMap.getDescriptor().findFieldByNumber(1).getFullName();
thrown.expect(NonDeterministicException.class);
thrown.expectMessage(ReferencesMessageWithMap.class.getName());
thrown.expectMessage("transitively includes Map field " + mapFieldName);
thrown.expectMessage("file " + MessageWithMap.getDescriptor().getFile().getName());
verifyDeterministic(ProtoCoder.of(ReferencesMessageWithMap.class));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.