focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@Override
public Future<RestResponse> restRequest(RestRequest request)
{
return restRequest(request, new RequestContext());
}
|
@Test
public void testRestRetry() throws Exception
{
SimpleLoadBalancer balancer = prepareLoadBalancer(Arrays.asList("http://test.linkedin.com/retry1", "http://test.linkedin.com/good"),
HttpClientFactory.UNLIMITED_CLIENT_REQUEST_RETRY_RATIO);
DynamicClient dynamicClient = new DynamicClient(balancer, null);
RetryClient client = new RetryClient(
dynamicClient,
balancer,
D2ClientConfig.DEFAULT_RETRY_LIMIT,
RetryClient.DEFAULT_UPDATE_INTERVAL_MS,
RetryClient.DEFAULT_AGGREGATED_INTERVAL_NUM,
SystemClock.instance(),
true,
false);
URI uri = URI.create("d2://retryService?arg1arg2");
RestRequest restRequest = new RestRequestBuilder(uri).setEntity(CONTENT).build();
DegraderTrackerClientTest.TestCallback<RestResponse> restCallback = new DegraderTrackerClientTest.TestCallback<>();
client.restRequest(restRequest, restCallback);
assertNull(restCallback.e);
assertNotNull(restCallback.t);
}
|
@Override
public ExecuteContext doAfter(ExecuteContext context) {
final Object result = context.getResult();
if (result != null) {
// The original registry is restored
RegisterContext.INSTANCE.compareAndSet(false, true);
}
return context;
}
|
@Test
public void doAfter() throws NoSuchMethodException {
final ExecuteContext context = buildContext();
context.changeResult(new Object());
interceptor.doAfter(context);
Assert.assertTrue(RegisterContext.INSTANCE.isAvailable());
RegisterContext.INSTANCE.compareAndSet(true, false);
}
|
@Override
public Result responseMessageForCheckConnectionToPackage(String responseBody) {
return jsonResultMessageHandler.toResult(responseBody);
}
|
@Test
public void shouldBuildFailureResultFromCheckPackageConnectionResponse() throws Exception {
String responseBody = "{\"status\":\"failure\",messages=[\"message-one\",\"message-two\"]}";
Result result = messageHandler.responseMessageForCheckConnectionToPackage(responseBody);
assertFailureResult(result, List.of("message-one", "message-two"));
}
|
public static StructType groupingKeyType(Schema schema, Collection<PartitionSpec> specs) {
return buildPartitionProjectionType("grouping key", specs, commonActiveFieldIds(schema, specs));
}
|
@Test
public void testGroupingKeyTypeWithSpecEvolutionInV2Tables() {
TestTables.TestTable table =
TestTables.create(tableDir, "test", SCHEMA, BY_DATA_SPEC, V2_FORMAT_VERSION);
table.updateSpec().addField(Expressions.bucket("category", 8)).commit();
assertThat(table.specs()).hasSize(2);
StructType expectedType =
StructType.of(NestedField.optional(1000, "data", Types.StringType.get()));
StructType actualType = Partitioning.groupingKeyType(table.schema(), table.specs().values());
assertThat(actualType).isEqualTo(expectedType);
}
|
public static Read read() {
return new AutoValue_RedisIO_Read.Builder()
.setConnectionConfiguration(RedisConnectionConfiguration.create())
.setKeyPattern("*")
.setBatchSize(1000)
.setOutputParallelization(true)
.build();
}
|
@Test
public void testRead() {
List<KV<String, String>> data = buildIncrementalData("bulkread", 10);
data.forEach(kv -> client.set(kv.getKey(), kv.getValue()));
PCollection<KV<String, String>> read =
p.apply(
"Read",
RedisIO.read()
.withEndpoint(REDIS_HOST, port)
.withKeyPattern("bulkread*")
.withBatchSize(10));
PAssert.that(read).containsInAnyOrder(data);
p.run();
}
|
public static List<Validation> computeFlagsFromCSVString(String csvString,
Log log) {
List<Validation> flags = new ArrayList<>();
boolean resetFlag = false;
for (String p : csvString.split(",")) {
try {
flags.add(Validation.valueOf(p));
} catch (IllegalArgumentException e) {
log.info("validateDMN configured with flag: '" + p + "' determines this Mojo will not be executed (reset all flags).");
resetFlag = true;
}
}
if (resetFlag) {
flags.clear();
}
return flags;
}
|
@Test
public void testFlagsDisable() {
List<DMNValidator.Validation> result = DMNValidationHelper.computeFlagsFromCSVString("disabled", log);
assertThat(result).isNotNull()
.hasSize(0);
}
|
@Operation(summary = "createProcessDefinition", description = "CREATE_PROCESS_DEFINITION_NOTES")
@Parameters({
@Parameter(name = "name", description = "PROCESS_DEFINITION_NAME", required = true, schema = @Schema(implementation = String.class)),
@Parameter(name = "locations", description = "PROCESS_DEFINITION_LOCATIONS", required = true, schema = @Schema(implementation = String.class)),
@Parameter(name = "description", description = "PROCESS_DEFINITION_DESC", required = false, schema = @Schema(implementation = String.class)),
@Parameter(name = "otherParamsJson", description = "OTHER_PARAMS_JSON", required = false, schema = @Schema(implementation = String.class))
})
@PostMapping()
@ResponseStatus(HttpStatus.CREATED)
@ApiException(CREATE_PROCESS_DEFINITION_ERROR)
public Result createProcessDefinition(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@Parameter(name = "projectCode", description = "PROJECT_CODE", required = true) @PathVariable long projectCode,
@RequestParam(value = "name", required = true) String name,
@RequestParam(value = "description", required = false) String description,
@RequestParam(value = "globalParams", required = false, defaultValue = "[]") String globalParams,
@RequestParam(value = "locations", required = false) String locations,
@RequestParam(value = "timeout", required = false, defaultValue = "0") int timeout,
@RequestParam(value = "taskRelationJson", required = true) String taskRelationJson,
@RequestParam(value = "taskDefinitionJson", required = true) String taskDefinitionJson,
@RequestParam(value = "otherParamsJson", required = false) String otherParamsJson,
@RequestParam(value = "executionType", defaultValue = "PARALLEL") ProcessExecutionTypeEnum executionType) {
Map<String, Object> result = processDefinitionService.createProcessDefinition(loginUser, projectCode, name,
description, globalParams,
locations, timeout, taskRelationJson, taskDefinitionJson, otherParamsJson, executionType);
return returnDataList(result);
}
|
@Test
public void testCreateProcessDefinition() {
String relationJson =
"[{\"name\":\"\",\"pre_task_code\":0,\"pre_task_version\":0,\"post_task_code\":123456789,\"post_task_version\":1,"
+ "\"condition_type\":0,\"condition_params\":\"{}\"},{\"name\":\"\",\"pre_task_code\":123456789,\"pre_task_version\":1,"
+ "\"post_task_code\":123451234,\"post_task_version\":1,\"condition_type\":0,\"condition_params\":\"{}\"}]";
String taskDefinitionJson =
"[{\"name\":\"detail_up\",\"description\":\"\",\"taskType\":\"SHELL\",\"taskParams\":"
+ "\"{\\\"resourceList\\\":[],\\\"localParams\\\":[{\\\"prop\\\":\\\"datetime\\\",\\\"direct\\\":\\\"IN\\\","
+ "\\\"type\\\":\\\"VARCHAR\\\",\\\"value\\\":\\\"${system.datetime}\\\"}],\\\"rawScript\\\":"
+ "\\\"echo ${datetime}\\\",\\\"conditionResult\\\":\\\"{\\\\\\\"successNode\\\\\\\":[\\\\\\\"\\\\\\\"],"
+ "\\\\\\\"failedNode\\\\\\\":[\\\\\\\"\\\\\\\"]}\\\",\\\"dependence\\\":{}}\",\"flag\":0,\"taskPriority\":0,"
+ "\"workerGroup\":\"default\",\"failRetryTimes\":0,\"failRetryInterval\":0,\"timeoutFlag\":0,"
+ "\"timeoutNotifyStrategy\":0,\"timeout\":0,\"delayTime\":0,\"resourceIds\":\"\"}]";
long projectCode = 1L;
String name = "dag_test";
String description = "desc test";
String globalParams = "[]";
String locations = "[]";
int timeout = 0;
Map<String, Object> result = new HashMap<>();
putMsg(result, Status.SUCCESS);
result.put(Constants.DATA_LIST, 1);
Mockito.when(
processDefinitionService.createProcessDefinition(user, projectCode, name, description, globalParams,
locations, timeout, relationJson, taskDefinitionJson, "",
ProcessExecutionTypeEnum.PARALLEL))
.thenReturn(result);
Result response =
processDefinitionController.createProcessDefinition(user, projectCode, name, description, globalParams,
locations, timeout, relationJson, taskDefinitionJson, "",
ProcessExecutionTypeEnum.PARALLEL);
Assertions.assertEquals(Status.SUCCESS.getCode(), response.getCode().intValue());
}
|
@Override
public List<HouseTable> findAllByDatabaseId(String databaseId) {
Map<String, String> params = new HashMap<>();
if (Strings.isNotEmpty(databaseId)) {
params.put("databaseId", databaseId);
}
return getHtsRetryTemplate(
Arrays.asList(
HouseTableRepositoryStateUnkownException.class, IllegalStateException.class))
.execute(
context ->
apiInstance
.getUserTables(params)
.map(GetAllEntityResponseBodyUserTable::getResults)
.flatMapMany(Flux::fromIterable)
.map(houseTableMapper::toHouseTable)
.collectList()
.block(Duration.ofSeconds(REQUEST_TIMEOUT_SECONDS)));
}
|
@Test
public void testListWithEmptyResult() {
// Shouldn't expect failure but gracefully getting an empty list.
List<UserTable> tables = new ArrayList<>();
GetAllEntityResponseBodyUserTable listResponse = new GetAllEntityResponseBodyUserTable();
Field resultField =
ReflectionUtils.findField(GetAllEntityResponseBodyUserTable.class, "results");
Assertions.assertNotNull(resultField);
ReflectionUtils.makeAccessible(resultField);
ReflectionUtils.setField(resultField, listResponse, tables);
mockHtsServer.enqueue(
new MockResponse()
.setResponseCode(200)
.setBody((new Gson()).toJson(listResponse))
.addHeader("Content-Type", "application/json"));
List<HouseTable> returnList = htsRepo.findAllByDatabaseId(HOUSE_TABLE.getDatabaseId());
assertThat(returnList).hasSize(0);
}
|
@Override
public void write(OutputStream os) throws IOException {
IOUtils.skipFully(is, offset);
long bytes = 0L;
if (len == -1) {
// Use the configured buffer size instead of hardcoding to 4k
bytes = FSOperations.copyBytes(is, os);
} else {
bytes = FSOperations.copyBytes(is, os, len);
}
// Update metrics.
HttpFSServerMetrics metrics = HttpFSServerWebApp.get().getMetrics();
if (metrics != null) {
metrics.incrBytesRead(bytes);
}
}
|
@Test
public void test() throws Exception {
InputStream is = new ByteArrayInputStream("abc".getBytes());
ByteArrayOutputStream baos = new ByteArrayOutputStream();
InputStreamEntity i = new InputStreamEntity(is);
i.write(baos);
baos.close();
assertEquals(new String(baos.toByteArray()), "abc");
is = new ByteArrayInputStream("abc".getBytes());
baos = new ByteArrayOutputStream();
i = new InputStreamEntity(is, 1, 1);
i.write(baos);
baos.close();
assertEquals(baos.toByteArray()[0], 'b');
}
|
public static LinkedHashSet<Class<?>> listBeansRecursiveInclusive(Class<?> beanClass) {
return listBeansRecursiveInclusive(beanClass, new LinkedHashSet<>());
}
|
@Test(expectedExceptions = UnsupportedOperationException.class)
public void listBeansCyclic() {
LinkedHashSet<Class<?>> classes = TypeUtils.listBeansRecursiveInclusive(Cyclic.class);
// System.out.println(classes);
assertEquals(classes.size(), 2);
}
|
@Override
public ExecuteContext doBefore(ExecuteContext context) {
DatabaseInfo databaseInfo = getDataBaseInfo(context);
String database = databaseInfo.getDatabaseName();
Query query = (Query) context.getArguments()[0];
String sql = query.toString((ParameterList) context.getArguments()[1]);
handleWriteOperationIfWriteDisabled(sql, database,
DatabaseWriteProhibitionManager.getOpenGaussProhibitionDatabases(), context);
return context;
}
|
@Test
public void testDoBefore() throws Exception {
// Database write prohibition switch turned off
GLOBAL_CONFIG.setEnableOpenGaussWriteProhibition(false);
ExecuteContext context = ExecuteContext.forMemberMethod(queryExecutor, methodMock, argument, null, null);
queryExecutorImplInterceptor.before(context);
Assert.assertNull(context.getThrowableOut());
// The database write prohibition switch is turned off, and the write prohibition database set contains the
// intercepted database
Set<String> databases = new HashSet<>();
databases.add("database-test");
GLOBAL_CONFIG.setOpenGaussDatabases(databases);
queryExecutorImplInterceptor.before(context);
Assert.assertNull(context.getThrowableOut());
// The database write prohibition switch is turned on, and the write prohibition database collection contains
// the intercepted databases
GLOBAL_CONFIG.setEnableOpenGaussWriteProhibition(true);
context = ExecuteContext.forMemberMethod(queryExecutor, methodMock, argument, null, null);
queryExecutorImplInterceptor.before(context);
Assert.assertEquals("Database prohibit to write, database: database-test",
context.getThrowableOut().getMessage());
// The database write prohibition switch is turned on, and the write prohibition database collection contains
// the intercepted database. SQL does not perform write operations
Query readQuery = new BatchedQuery(new NativeQuery(READ_SQL, null),
null, 0, 0, false);
context = ExecuteContext.forMemberMethod(queryExecutor, methodMock,
new Object[]{readQuery, null, null, null, null, null, null}, null, null);
queryExecutorImplInterceptor.before(context);
Assert.assertNull(context.getThrowableOut());
// The database write prohibition switch is turned on, and the write prohibition database collection does not
// contain the intercepted database
GLOBAL_CONFIG.setOpenGaussDatabases(new HashSet<>());
context = ExecuteContext.forMemberMethod(queryExecutor, methodMock, argument, null, null);
queryExecutorImplInterceptor.before(context);
Assert.assertNull(context.getThrowableOut());
}
|
public List<Long> getAggregatedSubpartitionBytes() {
checkState(aggregatedSubpartitionBytes != null, "Not all partition infos are ready");
return Collections.unmodifiableList(aggregatedSubpartitionBytes);
}
|
@Test
void testGetAggregatedSubpartitionBytes() {
AllToAllBlockingResultInfo resultInfo =
new AllToAllBlockingResultInfo(new IntermediateDataSetID(), 2, 2, false);
resultInfo.recordPartitionInfo(0, new ResultPartitionBytes(new long[] {32L, 64L}));
resultInfo.recordPartitionInfo(1, new ResultPartitionBytes(new long[] {128L, 256L}));
assertThat(resultInfo.getAggregatedSubpartitionBytes()).containsExactly(160L, 320L);
}
|
PMML4Result getPMML4Result(final KiePMMLMiningModel toEvaluate,
final LinkedHashMap<String, KiePMMLNameValueProbabilityMapTuple> inputData,
final PMMLRuntimeContext pmmlContext) {
final MULTIPLE_MODEL_METHOD multipleModelMethod = toEvaluate.getSegmentation().getMultipleModelMethod();
Object result = null;
LinkedHashMap<String, Double> probabilityResultMap = null;
ResultCode resultCode = OK;
final LinkedHashMap<String, KiePMMLNameValue> toUseForPrediction = new LinkedHashMap<>();
final LinkedHashMap<String, List<KiePMMLNameValue>> toUseForProbability = new LinkedHashMap<>();
inputData.forEach((key, value) -> {
toUseForPrediction.put(key, value.predictionValue);
toUseForProbability.put(key, value.probabilityValues);
});
try {
if (MINING_FUNCTION.CLASSIFICATION.equals(toEvaluate.getMiningFunction())) {
result = multipleModelMethod.applyClassification(toUseForPrediction);
probabilityResultMap = multipleModelMethod.applyProbability(toUseForProbability);
} else {
result = multipleModelMethod.applyPrediction(toUseForPrediction);
}
} catch (KieEnumException e) {
logger.warn(e.getMessage());
resultCode = FAIL;
}
pmmlContext.setProbabilityResultMap(probabilityResultMap);
PMML4Result toReturn = new PMML4Result();
toReturn.addResultVariable(toEvaluate.getTargetField(), result);
toReturn.setResultObjectName(toEvaluate.getTargetField());
toReturn.setResultCode(resultCode.getName());
return toReturn;
}
|
@Test
void getPMML4ResultOK() {
String fileName = "FILENAME";
String name = "NAME";
String targetField = "TARGET";
String prediction = "FIRST_VALUE";
KiePMMLSegmentation kiePMMLSegmentation = KiePMMLSegmentation.builder("SEGM_1", Collections.emptyList(), SELECT_FIRST).build();
KiePMMLMiningModel kiePMMLMiningModel = KiePMMLMiningModel.builder(fileName, name, Collections.emptyList(),
MINING_FUNCTION.ASSOCIATION_RULES)
.withTargetField(targetField)
.withSegmentation(kiePMMLSegmentation)
.build();
final LinkedHashMap<String, PMMLMiningModelEvaluator.KiePMMLNameValueProbabilityMapTuple> inputData = new LinkedHashMap<>();
inputData.put("FIRST_KEY", new PMMLMiningModelEvaluator.KiePMMLNameValueProbabilityMapTuple(new KiePMMLNameValue("FIRST_NAME", prediction), new ArrayList<>()));
inputData.put("SECOND_KEY", new PMMLMiningModelEvaluator.KiePMMLNameValueProbabilityMapTuple(new KiePMMLNameValue("SECOND_NAME", "SECOND_VALUE"), new ArrayList<>()));
PMML4Result retrieved = evaluator.getPMML4Result(kiePMMLMiningModel, inputData, new PMMLRuntimeContextTest());
assertThat(retrieved).isNotNull();
assertThat(retrieved.getResultCode()).isEqualTo(OK.getName());
assertThat(retrieved.getResultObjectName()).isEqualTo(targetField);
final Map<String, Object> resultVariables = retrieved.getResultVariables();
assertThat(resultVariables).containsKey(targetField);
assertThat(resultVariables.get(targetField)).isEqualTo(prediction);
}
|
public static void toast(Context context, @StringRes int message) {
// this is a static method so it is easier to call,
// as the context checking and casting is done for you
if (context == null) return;
if (!(context instanceof Application)) {
context = context.getApplicationContext();
}
if (context instanceof Application) {
final Context c = context;
final @StringRes int m = message;
getInstance().runInApplicationThread(() -> Toast.makeText(c, m, Toast.LENGTH_LONG).show());
}
}
|
@Test
public void testToastWithStringRes() {
AppConfig.toast(ApplicationProvider.getApplicationContext(), R.string.ok);
shadowOf(getMainLooper()).idle();
await().atMost(5, TimeUnit.SECONDS).until(() -> ShadowToast.getLatestToast() != null);
assertEquals(
ApplicationProvider.getApplicationContext().getString(R.string.ok),
ShadowToast.getTextOfLatestToast());
}
|
@Get(uri = "inputs")
@ExecuteOn(TaskExecutors.IO)
@Operation(
tags = {"Plugins"},
summary = "Get all types for an inputs"
)
public List<InputType> inputs() throws ClassNotFoundException {
return Stream.of(Type.values())
.map(throwFunction(type -> new InputType(type.name(), type.cls().getName())))
.toList();
}
|
@Test
void inputs() throws URISyntaxException {
Helpers.runApplicationContext((applicationContext, embeddedServer) -> {
ReactorHttpClient client = ReactorHttpClient.create(embeddedServer.getURL());
List<InputType> doc = client.toBlocking().retrieve(
HttpRequest.GET("/api/v1/plugins/inputs"),
Argument.listOf(InputType.class)
);
assertThat(doc.size(), is(16));
});
}
|
public static <T> T fillBean(String className, Map<List<String>, Object> params, ClassLoader classLoader) {
return fillBean(errorEmptyMessage(), className, params, classLoader);
}
|
@Test(expected = ScenarioException.class)
public void fillBeanLoadClassTest() {
ScenarioBeanUtil.fillBean(errorEmptyMessage(), "FakeCanonicalName", new HashMap<>(), classLoader);
}
|
@VisibleForTesting
static Instant getCreationTime(String configuredCreationTime, ProjectProperties projectProperties)
throws DateTimeParseException, InvalidCreationTimeException {
try {
switch (configuredCreationTime) {
case "EPOCH":
return Instant.EPOCH;
case "USE_CURRENT_TIMESTAMP":
projectProperties.log(
LogEvent.debug(
"Setting image creation time to current time; your image may not be reproducible."));
return Instant.now();
default:
DateTimeFormatter formatter =
new DateTimeFormatterBuilder()
.append(DateTimeFormatter.ISO_DATE_TIME) // parses isoStrict
// add ability to parse with no ":" in tz
.optionalStart()
.appendOffset("+HHmm", "+0000")
.optionalEnd()
.toFormatter();
return formatter.parse(configuredCreationTime, Instant::from);
}
} catch (DateTimeParseException ex) {
throw new InvalidCreationTimeException(configuredCreationTime, configuredCreationTime, ex);
}
}
|
@Test
public void testGetCreationTime_invalidValue() {
InvalidCreationTimeException exception =
assertThrows(
InvalidCreationTimeException.class,
() ->
PluginConfigurationProcessor.getCreationTime("invalid format", projectProperties));
assertThat(exception).hasMessageThat().isEqualTo("invalid format");
assertThat(exception.getInvalidCreationTime()).isEqualTo("invalid format");
}
|
@Nullable static String channelName(@Nullable Destination destination) {
if (destination == null) return null;
boolean isQueue = isQueue(destination);
try {
if (isQueue) {
return ((Queue) destination).getQueueName();
} else {
return ((Topic) destination).getTopicName();
}
} catch (Throwable t) {
propagateIfFatal(t);
log(t, "error getting destination name from {0}", destination, null);
}
return null;
}
|
@Test void channelName_queueAndTopic_queueOnQueueName() throws JMSException {
QueueAndTopic destination = mock(QueueAndTopic.class);
when(destination.getQueueName()).thenReturn("queue-foo");
assertThat(MessageParser.channelName(destination))
.isEqualTo("queue-foo");
}
|
@VisibleForTesting
void validateCaptcha(AuthLoginReqVO reqVO) {
// 如果验证码关闭,则不进行校验
if (!captchaEnable) {
return;
}
// 校验验证码
ValidationUtils.validate(validator, reqVO, AuthLoginReqVO.CodeEnableGroup.class);
CaptchaVO captchaVO = new CaptchaVO();
captchaVO.setCaptchaVerification(reqVO.getCaptchaVerification());
ResponseModel response = captchaService.verification(captchaVO);
// 验证不通过
if (!response.isSuccess()) {
// 创建登录失败日志(验证码不正确)
createLoginLog(null, reqVO.getUsername(), LoginLogTypeEnum.LOGIN_USERNAME, LoginResultEnum.CAPTCHA_CODE_ERROR);
throw exception(AUTH_LOGIN_CAPTCHA_CODE_ERROR, response.getRepMsg());
}
}
|
@Test
public void testValidateCaptcha_successWithDisable() {
// 准备参数
AuthLoginReqVO reqVO = randomPojo(AuthLoginReqVO.class);
// mock 验证码关闭
ReflectUtil.setFieldValue(authService, "captchaEnable", false);
// 调用,无需断言
authService.validateCaptcha(reqVO);
}
|
@Override
public URL getURL(final int columnIndex) throws SQLException {
return (URL) mergeResultSet.getValue(columnIndex, URL.class);
}
|
@Test
void assertGetURLWithColumnLabel() throws SQLException, MalformedURLException {
when(mergeResultSet.getValue(1, URL.class)).thenReturn(new URL("http://xxx.xxx"));
assertThat(shardingSphereResultSet.getURL("label"), is(new URL("http://xxx.xxx")));
}
|
public ValidationResult validateMessagesAndAssignOffsets(PrimitiveRef.LongRef offsetCounter,
MetricsRecorder metricsRecorder,
BufferSupplier bufferSupplier) {
if (sourceCompressionType == CompressionType.NONE && targetCompression.type() == CompressionType.NONE) {
// check the magic value
if (!records.hasMatchingMagic(toMagic))
return convertAndAssignOffsetsNonCompressed(offsetCounter, metricsRecorder);
else
// Do in-place validation, offset assignment and maybe set timestamp
return assignOffsetsNonCompressed(offsetCounter, metricsRecorder);
} else
return validateMessagesAndAssignOffsetsCompressed(offsetCounter, metricsRecorder, bufferSupplier);
}
|
@Test
public void testInvalidCreateTimeCompressedV2() {
long now = System.currentTimeMillis();
Compression compression = Compression.gzip().build();
MemoryRecords records = createRecords(
RecordBatch.MAGIC_VALUE_V2,
now - 1001L,
compression
);
assertThrows(RecordValidationException.class, () ->
new LogValidator(
records,
new TopicPartition("topic", 0),
time,
CompressionType.GZIP,
compression,
false,
RecordBatch.MAGIC_VALUE_V2,
TimestampType.CREATE_TIME,
1000L,
1000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT,
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(0),
metricsRecorder,
RequestLocal.withThreadConfinedCaching().bufferSupplier()
)
);
}
|
public boolean isValid() {
return sizeInBytes() >= RECORD_BATCH_OVERHEAD && checksum() == computeChecksum();
}
|
@Test
public void testInvalidRecordSize() {
MemoryRecords records = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V2, 0L,
Compression.NONE, TimestampType.CREATE_TIME,
new SimpleRecord(1L, "a".getBytes(), "1".getBytes()),
new SimpleRecord(2L, "b".getBytes(), "2".getBytes()),
new SimpleRecord(3L, "c".getBytes(), "3".getBytes()));
ByteBuffer buffer = records.buffer();
buffer.putInt(DefaultRecordBatch.LENGTH_OFFSET, 10);
DefaultRecordBatch batch = new DefaultRecordBatch(buffer);
assertFalse(batch.isValid());
assertThrows(CorruptRecordException.class, batch::ensureValid);
}
|
@Override
public <VOut> KStream<K, VOut> processValues(
final FixedKeyProcessorSupplier<? super K, ? super V, VOut> processorSupplier,
final String... stateStoreNames
) {
return processValues(
processorSupplier,
Named.as(builder.newProcessorName(PROCESSVALUES_NAME)),
stateStoreNames
);
}
|
@Test
public void shouldNotAllowNullNamedOnProcessValuesWithStores() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.processValues(fixedKeyProcessorSupplier, (Named) null, "storeName"));
assertThat(exception.getMessage(), equalTo("named can't be null"));
}
|
@Override
public StorageObject upload(final Path file, final Local local, final BandwidthThrottle throttle, final StreamListener listener,
final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
final ThreadPool pool = ThreadPoolFactory.get("multipart", concurrency);
try {
MultipartUpload multipart = null;
try {
if(status.isAppend()) {
final List<MultipartUpload> list = multipartService.find(file);
if(!list.isEmpty()) {
multipart = list.iterator().next();
}
}
}
catch(AccessDeniedException | InteroperabilityException e) {
log.warn(String.format("Ignore failure listing incomplete multipart uploads. %s", e));
}
final Path bucket = containerService.getContainer(file);
final List<MultipartPart> completed = new ArrayList<>();
// Not found or new upload
if(null == multipart) {
if(log.isInfoEnabled()) {
log.info("No pending multipart upload found");
}
final S3Object object = new S3WriteFeature(session, acl).getDetails(file, status);
// ID for the initiated multipart upload.
multipart = session.getClient().multipartStartUpload(bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), object);
if(log.isDebugEnabled()) {
log.debug(String.format("Multipart upload started for %s with ID %s", multipart.getObjectKey(), multipart.getUploadId()));
}
}
else {
if(status.isAppend()) {
// Add already completed parts
completed.addAll(multipartService.list(multipart));
}
}
// Full size of file
final long size = status.getOffset() + status.getLength();
final List<Future<MultipartPart>> parts = new ArrayList<>();
long remaining = status.getLength();
long offset = 0;
for(int partNumber = 1; remaining > 0; partNumber++) {
boolean skip = false;
if(status.isAppend()) {
if(log.isInfoEnabled()) {
log.info(String.format("Determine if part number %d can be skipped", partNumber));
}
for(MultipartPart c : completed) {
if(c.getPartNumber().equals(partNumber)) {
if(log.isInfoEnabled()) {
log.info(String.format("Skip completed part number %d", partNumber));
}
skip = true;
offset += c.getSize();
break;
}
}
}
if(!skip) {
// Last part can be less than 5 MB. Adjust part size.
final long length = Math.min(Math.max((size / (S3DefaultMultipartService.MAXIMUM_UPLOAD_PARTS - 1)), partsize), remaining);
// Submit to queue
parts.add(this.submit(pool, file, local, throttle, listener, status, multipart, partNumber, offset, length, callback));
remaining -= length;
offset += length;
}
}
completed.addAll(Interruptibles.awaitAll(parts));
// Combining all the given parts into the final object. Processing of a Complete Multipart Upload request
// could take several minutes to complete. Because a request could fail after the initial 200 OK response
// has been sent, it is important that you check the response body to determine whether the request succeeded.
multipart.setBucketName(bucket.isRoot() ? StringUtils.EMPTY : bucket.getName());
final MultipartCompleted complete = session.getClient().multipartCompleteUpload(multipart, completed);
if(log.isInfoEnabled()) {
log.info(String.format("Completed multipart upload for %s with %d parts and checksum %s",
complete.getObjectKey(), completed.size(), complete.getEtag()));
}
if(file.getType().contains(Path.Type.encrypted)) {
log.warn(String.format("Skip checksum verification for %s with client side encryption enabled", file));
}
else {
if(S3Session.isAwsHostname(session.getHost().getHostname())) {
completed.sort(new MultipartPart.PartNumberComparator());
final StringBuilder concat = new StringBuilder();
for(MultipartPart part : completed) {
concat.append(part.getEtag());
}
final String expected = String.format("%s-%d",
ChecksumComputeFactory.get(HashAlgorithm.md5).compute(concat.toString()), completed.size());
final String reference = StringUtils.remove(complete.getEtag(), "\"");
if(!StringUtils.equalsIgnoreCase(expected, reference)) {
throw new ChecksumException(MessageFormat.format(LocaleFactory.localizedString("Upload {0} failed", "Error"), file.getName()),
MessageFormat.format("Mismatch between MD5 hash {0} of uploaded data and ETag {1} returned by the server",
expected, reference));
}
}
}
final StorageObject object = new StorageObject(containerService.getKey(file));
object.setETag(complete.getEtag());
object.setContentLength(size);
object.setStorageClass(multipart.getStorageClass());
if(multipart.getMetadata() != null) {
object.addAllMetadata(multipart.getMetadata());
}
// Mark parent status as complete
status.withResponse(new S3AttributesAdapter(session.getHost()).toAttributes(object)).setComplete();
return object;
}
catch(ServiceException e) {
throw new S3ExceptionMappingService().map("Upload {0} failed", e, file);
}
finally {
// Cancel future tasks
pool.shutdown(false);
}
}
|
@Test
public void testUploadBucketInHostname() throws Exception {
final S3AccessControlListFeature acl = new S3AccessControlListFeature(virtualhost);
final S3MultipartUploadService service = new S3MultipartUploadService(virtualhost, new S3WriteFeature(virtualhost, acl), acl, 5 * 1024L * 1024L, 2);
final Path container = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume));
final String name = String.format(" %s.txt", UUID.randomUUID());
final Path test = new Path(container, name, EnumSet.of(Path.Type.file));
final Local local = new Local(System.getProperty("java.io.tmpdir"), name);
final byte[] random = RandomUtils.nextBytes(1021);
IOUtils.write(random, local.getOutputStream(false));
final TransferStatus status = new TransferStatus();
status.setLength(random.length);
status.setStorageClass(S3Object.STORAGE_CLASS_REDUCED_REDUNDANCY);
final BytecountStreamListener count = new BytecountStreamListener();
service.upload(test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED),
count, status, new DisabledLoginCallback());
assertEquals(random.length, count.getSent());
assertSame(Checksum.NONE, status.getResponse().getChecksum());
assertTrue(status.isComplete());
assertNotSame(PathAttributes.EMPTY, status.getResponse());
assertEquals(random.length, status.getResponse().getSize());
assertTrue(new S3FindFeature(virtualhost, acl).find(test));
final PathAttributes attr = new S3AttributesFinderFeature(virtualhost, acl).find(test);
assertEquals(status.getResponse().getETag(), attr.getETag());
assertEquals(status.getResponse().getChecksum(), attr.getChecksum());
assertEquals(random.length, attr.getSize());
assertEquals(Checksum.NONE, attr.getChecksum());
assertNotNull(attr.getETag());
// d2b77e21aa68ebdcbfb589124b9f9192-1
assertEquals(Checksum.NONE, Checksum.parse(attr.getETag()));
assertEquals(S3Object.STORAGE_CLASS_REDUCED_REDUNDANCY, new S3StorageClassFeature(virtualhost, acl).getClass(test));
new S3DefaultDeleteFeature(virtualhost).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
local.delete();
}
|
public CompletableFuture<RemotingCommand> forwardMessageToDeadLetterQueue(ProxyContext ctx, ReceiptHandle handle,
String messageId, String groupName, String topicName, long timeoutMillis) {
CompletableFuture<RemotingCommand> future = new CompletableFuture<>();
try {
if (handle.getCommitLogOffset() < 0) {
throw new ProxyException(ProxyExceptionCode.INVALID_RECEIPT_HANDLE, "commit log offset is empty");
}
ConsumerSendMsgBackRequestHeader consumerSendMsgBackRequestHeader = new ConsumerSendMsgBackRequestHeader();
consumerSendMsgBackRequestHeader.setOffset(handle.getCommitLogOffset());
consumerSendMsgBackRequestHeader.setGroup(groupName);
consumerSendMsgBackRequestHeader.setDelayLevel(-1);
consumerSendMsgBackRequestHeader.setOriginMsgId(messageId);
consumerSendMsgBackRequestHeader.setOriginTopic(handle.getRealTopic(topicName, groupName));
consumerSendMsgBackRequestHeader.setMaxReconsumeTimes(0);
future = this.serviceManager.getMessageService().sendMessageBack(
ctx,
handle,
messageId,
consumerSendMsgBackRequestHeader,
timeoutMillis
).whenCompleteAsync((remotingCommand, t) -> {
if (t == null && remotingCommand.getCode() == ResponseCode.SUCCESS) {
this.messagingProcessor.ackMessage(ctx, handle, messageId,
groupName, topicName, timeoutMillis);
}
}, this.executor);
} catch (Throwable t) {
future.completeExceptionally(t);
}
return FutureUtils.addExecutor(future, this.executor);
}
|
@Test
public void testForwardMessageToDeadLetterQueue() throws Throwable {
ArgumentCaptor<ConsumerSendMsgBackRequestHeader> requestHeaderArgumentCaptor = ArgumentCaptor.forClass(ConsumerSendMsgBackRequestHeader.class);
when(this.messageService.sendMessageBack(any(), any(), anyString(), requestHeaderArgumentCaptor.capture(), anyLong()))
.thenReturn(CompletableFuture.completedFuture(mock(RemotingCommand.class)));
MessageExt messageExt = createMessageExt(KeyBuilder.buildPopRetryTopic(TOPIC, CONSUMER_GROUP, new BrokerConfig().isEnableRetryTopicV2()), "", 16, 3000);
RemotingCommand remotingCommand = this.producerProcessor.forwardMessageToDeadLetterQueue(
createContext(),
create(messageExt),
messageExt.getMsgId(),
CONSUMER_GROUP,
TOPIC,
3000
).get();
assertNotNull(remotingCommand);
ConsumerSendMsgBackRequestHeader requestHeader = requestHeaderArgumentCaptor.getValue();
assertEquals(messageExt.getTopic(), requestHeader.getOriginTopic());
assertEquals(messageExt.getMsgId(), requestHeader.getOriginMsgId());
assertEquals(CONSUMER_GROUP, requestHeader.getGroup());
}
|
public static StatementExecutorResponse execute(
final ConfiguredStatement<TerminateQuery> statement,
final SessionProperties sessionProperties,
final KsqlExecutionContext executionContext,
final ServiceContext serviceContext
) {
final TerminateQuery terminateQuery = statement.getStatement();
// do default behaviour for TERMINATE ALL
if (!terminateQuery.getQueryId().isPresent()) {
return StatementExecutorResponse.notHandled();
}
final QueryId queryId = terminateQuery.getQueryId().get();
final RemoteHostExecutor remoteHostExecutor = RemoteHostExecutor.create(
statement,
sessionProperties,
executionContext,
serviceContext.getKsqlClient()
);
if (executionContext.getPersistentQuery(queryId).isPresent()
|| statement.getUnMaskedStatementText().equals(
TerminateCluster.TERMINATE_CLUSTER_STATEMENT_TEXT)) {
// do default behaviour for terminating persistent queries
return StatementExecutorResponse.notHandled();
} else {
// Check are we running this push query locally, if yes then terminate, otherwise
// propagate terminate query to other nodes
if (executionContext.getQuery(queryId).isPresent()) {
executionContext.getQuery(queryId).get().close();
} else {
final boolean wasTerminatedRemotely = remoteHostExecutor.fetchAllRemoteResults().getLeft()
.values()
.stream()
.map(TerminateQueryEntity.class::cast)
.map(TerminateQueryEntity::getWasTerminated)
.anyMatch(b -> b.equals(true));
if (!wasTerminatedRemotely) {
throw new KsqlException(String.format(
"Failed to terminate query with query ID: '%s'",
queryId));
}
}
return StatementExecutorResponse.handled(Optional.of(
new TerminateQueryEntity(statement.getMaskedStatementText(), queryId.toString(), true)
));
}
}
|
@Test
public void shouldFailToTerminateTransientQuery() {
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> CustomExecutors.TERMINATE_QUERY.execute(
engine.configure("TERMINATE TRANSIENT_QUERY;"),
mock(SessionProperties.class),
engine.getEngine(),
this.engine.getServiceContext()
));
// Then:
assertThat(e.getMessage(), containsString(
"Failed to terminate query with query ID: 'TRANSIENT_QUERY'"));
}
|
public IssueQuery create(SearchRequest request) {
try (DbSession dbSession = dbClient.openSession(false)) {
final ZoneId timeZone = parseTimeZone(request.getTimeZone()).orElse(clock.getZone());
Collection<RuleDto> ruleDtos = ruleKeysToRuleId(dbSession, request.getRules());
Collection<String> ruleUuids = ruleDtos.stream().map(RuleDto::getUuid).collect(Collectors.toSet());
Collection<String> issueKeys = collectIssueKeys(dbSession, request);
if (request.getRules() != null && request.getRules().stream().collect(Collectors.toSet()).size() != ruleDtos.size()) {
ruleUuids.add("non-existing-uuid");
}
IssueQuery.Builder builder = IssueQuery.builder()
.issueKeys(issueKeys)
.severities(request.getSeverities())
.cleanCodeAttributesCategories(request.getCleanCodeAttributesCategories())
.impactSoftwareQualities(request.getImpactSoftwareQualities())
.impactSeverities(request.getImpactSeverities())
.statuses(request.getStatuses())
.resolutions(request.getResolutions())
.issueStatuses(request.getIssueStatuses())
.resolved(request.getResolved())
.prioritizedRule(request.getPrioritizedRule())
.rules(ruleDtos)
.ruleUuids(ruleUuids)
.assigneeUuids(request.getAssigneeUuids())
.authors(request.getAuthors())
.scopes(request.getScopes())
.languages(request.getLanguages())
.tags(request.getTags())
.types(request.getTypes())
.pciDss32(request.getPciDss32())
.pciDss40(request.getPciDss40())
.owaspAsvs40(request.getOwaspAsvs40())
.owaspAsvsLevel(request.getOwaspAsvsLevel())
.owaspTop10(request.getOwaspTop10())
.owaspTop10For2021(request.getOwaspTop10For2021())
.stigAsdR5V3(request.getStigAsdV5R3())
.casa(request.getCasa())
.sansTop25(request.getSansTop25())
.cwe(request.getCwe())
.sonarsourceSecurity(request.getSonarsourceSecurity())
.assigned(request.getAssigned())
.createdAt(parseStartingDateOrDateTime(request.getCreatedAt(), timeZone))
.createdBefore(parseEndingDateOrDateTime(request.getCreatedBefore(), timeZone))
.facetMode(request.getFacetMode())
.timeZone(timeZone)
.codeVariants(request.getCodeVariants());
List<ComponentDto> allComponents = new ArrayList<>();
boolean effectiveOnComponentOnly = mergeDeprecatedComponentParameters(dbSession, request, allComponents);
addComponentParameters(builder, dbSession, effectiveOnComponentOnly, allComponents, request);
setCreatedAfterFromRequest(dbSession, builder, request, allComponents, timeZone);
String sort = request.getSort();
if (!isNullOrEmpty(sort)) {
builder.sort(sort);
builder.asc(request.getAsc());
}
return builder.build();
}
}
|
@Test
public void fail_if_no_component_provided_with_since_leak_period() {
assertThatThrownBy(() -> underTest.create(new SearchRequest().setInNewCodePeriod(true)))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("One and only one component must be provided when searching in new code period");
}
|
@ScalarFunction(value = "construct_tdigest", visibility = EXPERIMENTAL)
@Description("Create a TDigest by passing in its internal state.")
@SqlType("tdigest(double)")
public static Slice constructTDigest(
@SqlType("array(double)") Block centroidMeansBlock,
@SqlType("array(double)") Block centroidWeightsBlock,
@SqlType(StandardTypes.DOUBLE) double compression,
@SqlType(StandardTypes.DOUBLE) double min,
@SqlType(StandardTypes.DOUBLE) double max,
@SqlType(StandardTypes.DOUBLE) double sum,
@SqlType(StandardTypes.BIGINT) long count)
{
double[] centroidMeans = new double[centroidMeansBlock.getPositionCount()];
for (int i = 0; i < centroidMeansBlock.getPositionCount(); i++) {
centroidMeans[i] = DOUBLE.getDouble(centroidMeansBlock, i);
}
double[] centroidWeights = new double[centroidWeightsBlock.getPositionCount()];
for (int i = 0; i < centroidWeightsBlock.getPositionCount(); i++) {
centroidWeights[i] = DOUBLE.getDouble(centroidWeightsBlock, i);
}
TDigest tDigest = createTDigest(
centroidMeans,
centroidWeights,
compression,
min,
max,
sum,
toIntExact(count));
return tDigest.serialize();
}
|
@Test
public void testConstructTDigest()
{
TDigest tDigest = createTDigest(STANDARD_COMPRESSION_FACTOR);
ImmutableList<Double> values = ImmutableList.of(0.0d, 1.0d, 2.0d, 3.0d, 4.0d, 5.0d, 6.0d, 7.0d, 8.0d, 9.0d);
values.stream().forEach(tDigest::add);
List<Double> weights = Collections.nCopies(values.size(), 1.0);
double compression = Double.valueOf(STANDARD_COMPRESSION_FACTOR);
double min = values.stream().reduce(Double.POSITIVE_INFINITY, Double::min);
double max = values.stream().reduce(Double.NEGATIVE_INFINITY, Double::max);
double sum = values.stream().reduce(0.0d, Double::sum);
int count = values.size();
String sql = format("construct_tdigest(ARRAY%s, ARRAY%s, %s, %s, %s, %s, %s)",
values,
weights,
compression,
min,
max,
sum,
count);
functionAssertions.selectSingleValue(
sql,
TDIGEST_DOUBLE,
SqlVarbinary.class);
}
|
public void align() throws IOException {
int zeros = (-getPosition()) & 3;
if (zeros > 0) {
write(zeroBuf, 0, zeros);
}
}
|
@Test
public void testAlign() throws IOException {
// create a new writer so we can start at file position 0
startPosition = 0;
writer = new DexDataWriter(output, startPosition, 256);
writer.align();
writer.write(1);
writer.align();
writer.align();
writer.write(1);
writer.write(2);
writer.align();
writer.write(1);
writer.write(2);
writer.write(3);
writer.align();
writer.align();
writer.write(1);
writer.write(2);
writer.write(3);
writer.write(4);
writer.align();
writer.align();
writer.align();
writer.align();
writer.write(1);
writer.align();
expectData(0x01, 0x00, 0x00, 0x00,
0x01, 0x02, 0x00, 0x00,
0x01, 0x02, 0x03, 0x00,
0x01, 0x02, 0x03, 0x04,
0x01, 0x00, 0x00, 0x00);
}
|
public String tables(Namespace ns) {
return SLASH.join("v1", prefix, "namespaces", RESTUtil.encodeNamespace(ns), "tables");
}
|
@Test
public void testTables() {
Namespace ns = Namespace.of("ns");
assertThat(withPrefix.tables(ns)).isEqualTo("v1/ws/catalog/namespaces/ns/tables");
assertThat(withoutPrefix.tables(ns)).isEqualTo("v1/namespaces/ns/tables");
}
|
@VisibleForTesting
@Override
List<String> cancelNonTerminalTasks(Workflow workflow) {
List<String> erroredTasks = new ArrayList<>();
// Update non-terminal tasks' status to CANCELED
for (Task task : workflow.getTasks()) {
if (!task.getStatus().isTerminal()) {
// Cancel the ones which are not completed yet....
task.setStatus(CANCELED);
// all of our tasks are system tasks.
Checks.checkTrue(
SystemTaskType.is(task.getTaskType()),
"Invalid task type [%s], all tasks should have a known maestro task type.",
task.getTaskType());
WorkflowSystemTask workflowSystemTask = WorkflowSystemTask.get(task.getTaskType());
try {
workflowSystemTask.cancel(workflow, task, this);
executionDAOFacade.updateTask(task); // only update if cancelled
} catch (Exception e) {
erroredTasks.add(task.getReferenceTaskName());
LOG.error(
"Error canceling system task:{}/{} in workflow: {}",
workflowSystemTask.getName(),
task.getTaskId(),
workflow.getWorkflowId(),
e);
}
}
}
if (erroredTasks.isEmpty()) {
try {
workflowStatusListener.onWorkflowFinalizedIfEnabled(workflow);
queueDAO.remove(DECIDER_QUEUE, workflow.getWorkflowId());
} catch (Exception e) {
LOG.error("Error removing workflow: {} from decider queue", workflow.getWorkflowId(), e);
throw e; // we need to throw it to get at least once guarantee.
}
} else {
// also throw to retry errored tasks later.
throw new MaestroRetryableError(
"Error canceling tasks [%s] in workflow: [%s]", erroredTasks, workflow.getWorkflowId());
}
return erroredTasks;
}
|
@Test
public void testFinalizedFailure() {
Task startTask = new Task();
startTask.setTaskId(UUID.randomUUID().toString());
startTask.setTaskType(Constants.DEFAULT_START_STEP_NAME);
startTask.setStatus(Task.Status.IN_PROGRESS);
Task maestroTask = new Task();
maestroTask.setTaskId(UUID.randomUUID().toString());
maestroTask.setTaskType(Constants.MAESTRO_TASK_NAME);
maestroTask.setStatus(Task.Status.SCHEDULED);
workflow.getTasks().addAll(Arrays.asList(startTask, maestroTask));
doThrow(new MaestroRetryableError("test-finalize"))
.when(workflowStatusListener)
.onWorkflowFinalizedIfEnabled(any(Workflow.class));
AssertHelper.assertThrows(
"Cancel throws an exception will fail cancelNonTerminalTasks call.",
MaestroRetryableError.class,
"test-finalize",
() -> maestroWorkflowExecutor.cancelNonTerminalTasks(workflow));
ArgumentCaptor<Task> argumentCaptor2 = ArgumentCaptor.forClass(Task.class);
verify(executionDAOFacade, times(2)).updateTask(argumentCaptor2.capture());
assertEquals(2, argumentCaptor2.getAllValues().size());
assertEquals(
Constants.DEFAULT_START_STEP_NAME, argumentCaptor2.getAllValues().get(0).getTaskType());
assertEquals(Task.Status.CANCELED, argumentCaptor2.getAllValues().get(0).getStatus());
assertEquals(Constants.MAESTRO_TASK_NAME, argumentCaptor2.getAllValues().get(1).getTaskType());
assertEquals(Task.Status.CANCELED, argumentCaptor2.getAllValues().get(1).getStatus());
verify(workflowStatusListener, times(1)).onWorkflowFinalizedIfEnabled(any(Workflow.class));
verify(queueDAO, times(0)).remove(any(), any());
}
|
@DeleteMapping("/nodes")
@Secured(action = ActionTypes.WRITE, resource = "nacos/admin", signType = SignType.CONSOLE)
public RestResult<Void> deleteNodes(@RequestParam("addresses") List<String> addresses) throws Exception {
return RestResultUtils.failed(405, null, "DELETE /v2/core/cluster/nodes API not allow to use temporarily.");
}
|
@Test
void testLeave() throws Exception {
RestResult<Void> result = nacosClusterControllerV2.deleteNodes(Collections.singletonList("1.1.1.1"));
assertFalse(result.ok());
assertEquals(405, result.getCode());
}
|
public static boolean exists(String name) {
name = getWellFormName(name);
return STRING_ENV_MAP.containsKey(name);
}
|
@Test
public void exist() {
assertFalse(Env.exists("xxxyyy234"));
assertTrue(Env.exists("local"));
assertTrue(Env.exists("dev"));
}
|
@Override
@Transactional
public boolean checkForPreApproval(Long userId, Integer userType, String clientId, Collection<String> requestedScopes) {
// 第一步,基于 Client 的自动授权计算,如果 scopes 都在自动授权中,则返回 true 通过
OAuth2ClientDO clientDO = oauth2ClientService.validOAuthClientFromCache(clientId);
Assert.notNull(clientDO, "客户端不能为空"); // 防御性编程
if (CollUtil.containsAll(clientDO.getAutoApproveScopes(), requestedScopes)) {
// gh-877 - if all scopes are auto approved, approvals still need to be added to the approval store.
LocalDateTime expireTime = LocalDateTime.now().plusSeconds(TIMEOUT);
for (String scope : requestedScopes) {
saveApprove(userId, userType, clientId, scope, true, expireTime);
}
return true;
}
// 第二步,算上用户已经批准的授权。如果 scopes 都包含,则返回 true
List<OAuth2ApproveDO> approveDOs = getApproveList(userId, userType, clientId);
Set<String> scopes = convertSet(approveDOs, OAuth2ApproveDO::getScope,
OAuth2ApproveDO::getApproved); // 只保留未过期的 + 同意的
return CollUtil.containsAll(scopes, requestedScopes);
}
|
@Test
public void checkForPreApproval_approve() {
// 准备参数
Long userId = randomLongId();
Integer userType = randomEle(UserTypeEnum.values()).getValue();
String clientId = randomString();
List<String> requestedScopes = Lists.newArrayList("read");
// mock 方法
when(oauth2ClientService.validOAuthClientFromCache(eq(clientId)))
.thenReturn(randomPojo(OAuth2ClientDO.class).setAutoApproveScopes(null));
// mock 数据
OAuth2ApproveDO approve = randomPojo(OAuth2ApproveDO.class).setUserId(userId)
.setUserType(userType).setClientId(clientId).setScope("read")
.setExpiresTime(LocalDateTimeUtil.offset(LocalDateTime.now(), 1L, ChronoUnit.DAYS)).setApproved(true); // 同意
oauth2ApproveMapper.insert(approve);
// 调用
boolean success = oauth2ApproveService.checkForPreApproval(userId, userType,
clientId, requestedScopes);
// 断言
assertTrue(success);
}
|
public Response downloadDumpFile(String topologyId, String hostPort, String fileName, String user) throws IOException {
String[] hostPortSplit = hostPort.split(":");
String host = hostPortSplit[0];
String portStr = hostPortSplit[1];
Path rawFile = logRoot.resolve(topologyId).resolve(portStr).resolve(fileName);
Path absFile = rawFile.toAbsolutePath().normalize();
if (!absFile.startsWith(logRoot) || !rawFile.normalize().toString().equals(rawFile.toString())) {
//Ensure filename doesn't contain ../ parts
return LogviewerResponseBuilder.buildResponsePageNotFound();
}
if (absFile.toFile().exists()) {
String workerFileRelativePath = String.join(File.separator, topologyId, portStr, WORKER_LOG_FILENAME);
if (resourceAuthorizer.isUserAllowedToAccessFile(user, workerFileRelativePath)) {
String downloadedFileName = host + "-" + topologyId + "-" + portStr + "-" + absFile.getFileName();
return LogviewerResponseBuilder.buildDownloadFile(downloadedFileName, absFile.toFile(), numFileDownloadExceptions);
} else {
return LogviewerResponseBuilder.buildResponseUnauthorizedUser(user);
}
} else {
return LogviewerResponseBuilder.buildResponsePageNotFound();
}
}
|
@Test
public void testDownloadDumpFileTraversalInPort() throws IOException {
try (TmpPath rootPath = new TmpPath()) {
LogviewerProfileHandler handler = createHandlerTraversalTests(rootPath.getFile().toPath());
Response topoAResponse = handler.downloadDumpFile("../", "localhost:../logs", "daemon-dump.bin", "user");
Utils.forceDelete(rootPath.toString());
assertThat(topoAResponse.getStatus(), is(Response.Status.NOT_FOUND.getStatusCode()));
}
}
|
@Override
public BroadcastRuleConfiguration buildToBeAlteredRuleConfiguration(final DropBroadcastTableRuleStatement sqlStatement) {
BroadcastRuleConfiguration result = new BroadcastRuleConfiguration(new HashSet<>(rule.getConfiguration().getTables()));
Collection<String> toBeDroppedTableNames = new CaseInsensitiveSet<>(sqlStatement.getTables());
result.getTables().removeIf(toBeDroppedTableNames::contains);
return result;
}
|
@Test
void assertUpdateCurrentRuleConfiguration() {
BroadcastRuleConfiguration config = new BroadcastRuleConfiguration(new LinkedList<>());
config.getTables().add("t_address");
BroadcastRule rule = mock(BroadcastRule.class);
when(rule.getConfiguration()).thenReturn(config);
executor.setRule(rule);
DropBroadcastTableRuleStatement sqlStatement = new DropBroadcastTableRuleStatement(false, Collections.singleton("t_address"));
BroadcastRuleConfiguration toBeAlteredConfig = executor.buildToBeAlteredRuleConfiguration(sqlStatement);
assertTrue(toBeAlteredConfig.getTables().isEmpty());
}
|
@VisibleForTesting
protected RESTRequestInterceptor createRequestInterceptorChain() {
return RouterServerUtil.createRequestInterceptorChain(conf,
YarnConfiguration.ROUTER_WEBAPP_INTERCEPTOR_CLASS_PIPELINE,
YarnConfiguration.DEFAULT_ROUTER_WEBAPP_INTERCEPTOR_CLASS,
RESTRequestInterceptor.class);
}
|
@Test
public void testRequestInterceptorChainCreation() throws Exception {
RESTRequestInterceptor root =
super.getRouterWebServices().createRequestInterceptorChain();
int index = 0;
while (root != null) {
// The current pipeline is:
// PassThroughRESTRequestInterceptor - index = 0
// PassThroughRESTRequestInterceptor - index = 1
// PassThroughRESTRequestInterceptor - index = 2
// MockRESTRequestInterceptor - index = 3
switch (index) {
case 0: // Fall to the next case
case 1: // Fall to the next case
case 2:
// If index is equal to 0,1 or 2 we fall in this check
Assert.assertEquals(PassThroughRESTRequestInterceptor.class.getName(),
root.getClass().getName());
break;
case 3:
Assert.assertEquals(MockRESTRequestInterceptor.class.getName(),
root.getClass().getName());
break;
default:
Assert.fail();
}
root = root.getNextInterceptor();
index++;
}
Assert.assertEquals("The number of interceptors in chain does not match", 4,
index);
}
|
public static RSAPublicKey parseRSAPublicKey(String pem) throws ServletException {
String fullPem = PEM_HEADER + pem + PEM_FOOTER;
PublicKey key = null;
try {
CertificateFactory fact = CertificateFactory.getInstance("X.509");
ByteArrayInputStream is = new ByteArrayInputStream(
fullPem.getBytes(StandardCharsets.UTF_8));
X509Certificate cer = (X509Certificate) fact.generateCertificate(is);
key = cer.getPublicKey();
} catch (CertificateException ce) {
String message = null;
if (pem.startsWith(PEM_HEADER)) {
message = "CertificateException - be sure not to include PEM header "
+ "and footer in the PEM configuration element.";
} else {
message = "CertificateException - PEM may be corrupt";
}
throw new ServletException(message, ce);
}
return (RSAPublicKey) key;
}
|
@Test
public void testCorruptPEM() throws Exception {
String pem = "MIICOjCCAaOgAwIBAgIJANXi/oWxvJNzMA0GCSqGSIb3DQEBBQUAMF8xCzAJBgNVBAYTAlVTMQ0w"
+ "CwYDVQQIEwRUZXN0MQ0wCwYDVQQHEwRUZXN0MQ8wDQYDVQQKEwZIYWRvb3AxDTALBgNVBAsTBFRl"
+ "c3QxEjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0xNTAxMDIyMTE5MjRaFw0xNjAxMDIyMTE5MjRaMF8x"
+ "CzAJBgNVBAYTAlVTMQ0wCwYDVQQIEwRUZXN0MQ0wCwYDVQQHEwRUZXN0MQ8wDQYDVQQKEwZIYWRv"
+ "b3AxDTALBgNVBAsTBFRlc3QxEjAQBgNVBAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOB"
+ "jQAwgYkCgYEAwpfpLdi7dWTHNzETt+L7618/dWUQFb/C7o1jIxFgbKOVIB6d5YmvUbJck5PYxFkz"
+ "C25fmU5H71WGOI1Kle5TFDmIo+hqh5xqu1YNRZz9i6D94g+2AyYr9BpvH4ZfdHs7r9AU7c3kq68V"
+ "7OPuuaHb25J8isiOyA3RiWuJGQlXTdkCAwEAATANBgkqhkiG9w0BAQUFAAOBgQAdRUyCUqE9sdim"
+ "Fbll9BuZDKV16WXeWGq+kTd7ETe7l0fqXjq5EnrifOai0L/pXwVvS2jrFkKQRlRxRGUNaeEBZ2Wy"
+ "9aTyR+HGHCfvwoCegc9rAVw/DLaRriSO/jnEXzYK6XLVKH+hx5UXrJ7Oyc7JjZUc3g9kCWORThCX"
+ "Mzc1xA++";
try {
CertificateUtil.parseRSAPublicKey(pem);
fail("Should not have thrown ServletException");
} catch (ServletException se) {
assertTrue(se.getMessage().contains("corrupt"));
}
}
|
static ByteSource asByteSource(ZipFile file, ZipEntry entry) {
return new ZipEntryByteSource(file, entry);
}
|
@Test
public void testAsByteSource() throws Exception {
File zipDir = new File(tmpDir, "zip");
assertTrue(zipDir.mkdirs());
createFileWithContents(zipDir, "myTextFile.txt", "Simple Text");
ZipFiles.zipDirectory(tmpDir, zipFile);
try (ZipFile zip = new ZipFile(zipFile)) {
ZipEntry entry = zip.getEntry("zip/myTextFile.txt");
ByteSource byteSource = ZipFiles.asByteSource(zip, entry);
if (entry.getSize() != -1) {
assertEquals(entry.getSize(), byteSource.size());
}
assertArrayEquals("Simple Text".getBytes(StandardCharsets.UTF_8), byteSource.read());
}
}
|
public static void getSemanticPropsSingleFromString(
SingleInputSemanticProperties result,
String[] forwarded,
String[] nonForwarded,
String[] readSet,
TypeInformation<?> inType,
TypeInformation<?> outType) {
getSemanticPropsSingleFromString(
result, forwarded, nonForwarded, readSet, inType, outType, false);
}
|
@Test
void testForwardedRead() {
String[] forwardedFields = {"f0->f0;f1->f2"};
String[] readFields = {"f0; f2"};
SingleInputSemanticProperties sp = new SingleInputSemanticProperties();
SemanticPropUtil.getSemanticPropsSingleFromString(
sp, forwardedFields, null, readFields, threeIntTupleType, fiveIntTupleType);
assertThat(sp.getForwardingTargetFields(0, 0)).contains(0);
assertThat(sp.getForwardingTargetFields(0, 1)).contains(2);
assertThat(sp.getReadFields(0)).containsExactly(0, 2);
}
|
@Override
public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException {
for(Path f : files.keySet()) {
if(f.isPlaceholder()) {
log.warn(String.format("Ignore placeholder %s", f));
continue;
}
callback.delete(f);
try {
if(new SimplePathPredicate(DriveHomeFinderService.SHARED_DRIVES_NAME).test(f.getParent())) {
session.getClient().teamdrives().delete(fileid.getFileId(f)).execute();
}
else {
if(f.attributes().isDuplicate()) {
if(log.isWarnEnabled()) {
log.warn(String.format("Delete file %s already in trash", f));
}
// Permanently deletes a file version
session.getClient().revisions().delete(fileid.getFileId(f), f.attributes().getVersionId()).execute();
}
else {
session.getClient().files().delete(fileid.getFileId(f))
.setSupportsAllDrives(new HostPreferences(session.getHost()).getBoolean("googledrive.teamdrive.enable")).execute();
}
}
fileid.cache(f, null);
}
catch(IOException e) {
throw new DriveExceptionMappingService(fileid).map("Cannot delete {0}", e, f);
}
}
}
|
@Test
public void testDeleteMultipleFiles() throws Exception {
final DriveFileIdProvider fileid = new DriveFileIdProvider(session);
final Path folder = new DriveDirectoryFeature(session, fileid).mkdir(new Path(DriveHomeFinderService.MYDRIVE_FOLDER, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
final Path file1 = new DriveTouchFeature(session, fileid).touch(new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus());
final Path file2 = new DriveTouchFeature(session, fileid).touch(new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus());
assertTrue(new DriveFindFeature(session, fileid).find(file1));
assertTrue(new DriveFindFeature(session, fileid).find(file2));
new DriveDeleteFeature(session, fileid).delete(Arrays.asList(file1, file2), new DisabledLoginCallback(), new Delete.DisabledCallback());
assertFalse((new DriveFindFeature(session, fileid).find(file1, new DisabledListProgressListener())));
assertFalse((new DriveFindFeature(session, fileid).find(file2, new DisabledListProgressListener())));
assertTrue(new DriveFindFeature(session, fileid).find(folder, new DisabledListProgressListener()));
new DriveDeleteFeature(session, fileid).delete(Collections.singletonList(folder), new DisabledLoginCallback(), new Delete.DisabledCallback());
assertFalse((new DriveFindFeature(session, fileid).find(folder, new DisabledListProgressListener())));
}
|
public static URI parse(String featureIdentifier) {
requireNonNull(featureIdentifier, "featureIdentifier may not be null");
if (featureIdentifier.isEmpty()) {
throw new IllegalArgumentException("featureIdentifier may not be empty");
}
// Legacy from the Cucumber Eclipse plugin
// Older versions of Cucumber allowed it.
if (CLASSPATH_SCHEME_PREFIX.equals(featureIdentifier)) {
return rootPackageUri();
}
if (nonStandardPathSeparatorInUse(featureIdentifier)) {
String standardized = replaceNonStandardPathSeparator(featureIdentifier);
return parseAssumeFileScheme(standardized);
}
if (isWindowsOS() && pathContainsWindowsDrivePattern(featureIdentifier)) {
return parseAssumeFileScheme(featureIdentifier);
}
if (probablyURI(featureIdentifier)) {
return parseProbableURI(featureIdentifier);
}
return parseAssumeFileScheme(featureIdentifier);
}
|
@Test
void can_parse_eclipse_plugin_default_glue() {
// The eclipse plugin uses `classpath:` as the default
URI uri = FeaturePath.parse("classpath:");
assertAll(
() -> assertThat(uri.getScheme(), is("classpath")),
() -> assertThat(uri.getSchemeSpecificPart(), is("/")));
}
|
public String getTableCat() {
return tableCat;
}
|
@Test
public void testGetTableCat() {
ColumnMeta columnMeta = new ColumnMeta();
columnMeta.setTableCat("tableCat");
assertEquals(columnMeta.getTableCat(), "tableCat".trim());
}
|
@Override
public ExecuteContext before(ExecuteContext context) {
Object object = context.getObject();
if (object instanceof BaseLoadBalancer) {
List<Object> serverList = getServerList(context.getMethod().getName(), object);
if (CollectionUtils.isEmpty(serverList)) {
return context;
}
BaseLoadBalancer loadBalancer = (BaseLoadBalancer) object;
String name = loadBalancer.getName();
RequestData requestData = getRequestData().orElse(null);
List<Object> targetInstances = loadBalancerService.getTargetInstances(name, serverList, requestData);
context.skip(Collections.unmodifiableList(targetInstances));
}
return context;
}
|
@Test
public void testBeforeWhenInvalid() {
// configService.setInvalid(true);
interceptor.before(context);
BaseLoadBalancer loadBalancer = (BaseLoadBalancer) context.getObject();
List<Server> servers = loadBalancer.getAllServers();
Assert.assertNotNull(servers);
Assert.assertEquals(2, servers.size());
}
|
@Override
public SchemaKTable<K> select(
final List<ColumnName> keyColumnNames,
final List<SelectExpression> selectExpressions,
final Stacker contextStacker,
final PlanBuildContext buildContext,
final FormatInfo valueFormat
) {
final TableSelect<K> step = ExecutionStepFactory.tableMapValues(
contextStacker,
sourceTableStep,
keyColumnNames,
selectExpressions,
InternalFormats.of(keyFormat, valueFormat)
);
return new SchemaKTable<>(
step,
resolveSchema(step),
keyFormat,
ksqlConfig,
functionRegistry
);
}
|
@Test
public void testSelectWithExpression() {
// Given:
final String selectQuery = "SELECT col0, col3*3+5 FROM test2 WHERE col0 > 100 EMIT CHANGES;";
final PlanNode logicalPlan = buildLogicalPlan(selectQuery);
final ProjectNode projectNode = (ProjectNode) logicalPlan.getSources().get(0);
initialSchemaKTable = buildSchemaKTableFromPlan(logicalPlan);
// When:
final SchemaKTable<?> projectedSchemaKStream = initialSchemaKTable.select(ImmutableList.of(),
projectNode.getSelectExpressions(), childContextStacker, buildContext, internalFormats);
// Then:
assertThat(projectedSchemaKStream.getSchema(),
is(LogicalSchema.builder().keyColumn(ColumnName.of("COL0"), SqlTypes.BIGINT)
.valueColumn(ColumnName.of("COL0"), SqlTypes.BIGINT)
.valueColumn(ColumnName.of("KSQL_COL_0"), SqlTypes.DOUBLE).build()));
}
|
@Override
public String getConfigDirectory() {
final String configDir =
flinkConfig
.getOptional(DeploymentOptionsInternal.CONF_DIR)
.orElse(flinkConfig.get(KubernetesConfigOptions.FLINK_CONF_DIR));
checkNotNull(configDir);
return configDir;
}
|
@Test
void getConfigDirectoryFallbackToPodConfDir() {
final String confDirInPod = flinkConfig.get(KubernetesConfigOptions.FLINK_CONF_DIR);
assertThat(testingKubernetesParameters.getConfigDirectory()).isEqualTo(confDirInPod);
}
|
public TaskRunHistory getTaskRunHistory() {
return taskRunManager.getTaskRunHistory();
}
|
@Test
public void testForceGC2() {
TaskRunManager taskRunManager = new TaskRunManager();
for (int i = 0; i < 10; i++) {
TaskRunStatus taskRunStatus = new TaskRunStatus();
taskRunStatus.setQueryId("test" + i);
taskRunStatus.setTaskName("test" + i);
taskRunManager.getTaskRunHistory().addHistory(taskRunStatus);
}
Config.task_runs_max_history_number = 20;
taskRunManager.getTaskRunHistory().forceGC();
Assert.assertEquals(10, taskRunManager.getTaskRunHistory().getInMemoryHistory().size());
Config.task_runs_max_history_number = 10000;
}
|
@Override
public StorageObject upload(final Path file, Local local, final BandwidthThrottle throttle, final StreamListener listener,
final TransferStatus status, final ConnectionCallback prompt) throws BackgroundException {
if(this.threshold(status)) {
try {
return new S3MultipartUploadService(session, writer, acl).upload(file, local, throttle, listener, status, prompt);
}
catch(NotfoundException | InteroperabilityException e) {
log.warn(String.format("Failure %s using multipart upload. Fallback to single upload.", e));
status.append(false);
try {
return new S3SingleUploadService(session, writer).upload(file, local, throttle, listener, status, prompt);
}
catch(BackgroundException f) {
log.warn(String.format("Failure %s using single upload. Throw original multipart failure %s", e, e));
throw e;
}
}
}
// Use single upload service
return new S3SingleUploadService(session, writer).upload(file, local, throttle, listener, status, prompt);
}
|
@Test
public void testUploadSinglePartEuCentral() throws Exception {
final S3ThresholdUploadService service = new S3ThresholdUploadService(session, new S3AccessControlListFeature(session), 5 * 1024L);
final Path container = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume));
final String name = UUID.randomUUID().toString();
final Path test = new Path(container, name, EnumSet.of(Path.Type.file));
final Local local = new Local(System.getProperty("java.io.tmpdir"), name);
final byte[] random = RandomUtils.nextBytes(1000);
IOUtils.write(random, local.getOutputStream(false));
final TransferStatus status = new TransferStatus();
status.setLength(random.length);
status.setStorageClass(S3Object.STORAGE_CLASS_REDUCED_REDUNDANCY);
final BytecountStreamListener count = new BytecountStreamListener();
service.upload(test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED),
count, status, new DisabledLoginCallback());
assertEquals(random.length, count.getSent(), 0L);
assertTrue(status.isComplete());
assertTrue(new S3FindFeature(session, new S3AccessControlListFeature(session)).find(test));
final PathAttributes attributes = new S3AttributesFinderFeature(session, new S3AccessControlListFeature(session)).find(test);
assertEquals(random.length, attributes.getSize(), 0L);
assertEquals(S3Object.STORAGE_CLASS_REDUCED_REDUNDANCY, new S3StorageClassFeature(session, new S3AccessControlListFeature(session)).getClass(test));
new S3DefaultDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
local.delete();
}
|
@Override
@ManagedOperation(description = "Adds the key to the store")
public boolean add(String key) {
if (cache.asMap().containsKey(key)) {
return false;
} else {
cache.put(key, true);
return true;
}
}
|
@Test
void testAdd() {
// add first key
assertTrue(repo.add(key01));
assertTrue(repo.getCache().asMap().containsKey(key01));
// try to add the same key again
assertFalse(repo.add(key01));
// try to add another one
assertTrue(repo.add(key02));
assertTrue(repo.getCache().asMap().containsKey(key02));
}
|
public static <T> void forward(CompletableFuture<T> source, CompletableFuture<T> target) {
source.whenComplete(forwardTo(target));
}
|
@Test
void testForwardExceptionally() {
final CompletableFuture<String> source = new CompletableFuture<>();
final CompletableFuture<String> target = new CompletableFuture<>();
FutureUtils.forward(source, target);
assertThat(source).isNotDone();
assertThat(target).isNotDone();
final FlinkException expectedCause = new FlinkException("Expected exception");
source.completeExceptionally(expectedCause);
assertThat(source).isDone();
assertThat(target).isDone();
assertThatFuture(source)
.eventuallyFailsWith(ExecutionException.class)
.extracting(Throwable::getCause)
.isEqualTo(expectedCause);
assertThatFuture(target)
.eventuallyFailsWith(ExecutionException.class)
.extracting(Throwable::getCause)
.isEqualTo(expectedCause);
}
|
public static CompletableFuture<Map<String, String>> labelFailure(
final Throwable cause,
final Context context,
final Executor mainThreadExecutor,
final Collection<FailureEnricher> failureEnrichers) {
// list of CompletableFutures to enrich failure with labels from each enricher
final Collection<CompletableFuture<Map<String, String>>> enrichFutures = new ArrayList<>();
for (final FailureEnricher enricher : failureEnrichers) {
enrichFutures.add(
enricher.processFailure(cause, context)
.thenApply(
enricherLabels -> {
final Map<String, String> validLabels = new HashMap<>();
enricherLabels.forEach(
(k, v) -> {
if (!enricher.getOutputKeys().contains(k)) {
LOG.warn(
"Ignoring label with key {} from enricher {}"
+ " violating contract, keys allowed {}.",
k,
enricher.getClass(),
enricher.getOutputKeys());
} else {
validLabels.put(k, v);
}
});
return validLabels;
})
.exceptionally(
t -> {
LOG.warn(
"Enricher {} threw an exception.",
enricher.getClass(),
t);
return Collections.emptyMap();
}));
}
// combine all CompletableFutures into a single CompletableFuture containing a Map of labels
return FutureUtils.combineAll(enrichFutures)
.thenApplyAsync(
labelsToMerge -> {
final Map<String, String> mergedLabels = new HashMap<>();
for (Map<String, String> labels : labelsToMerge) {
labels.forEach(
(k, v) ->
// merge label with existing, throwing an exception
// if there is a key conflict
mergedLabels.merge(
k,
v,
(first, second) -> {
throw new FlinkRuntimeException(
String.format(
MERGE_EXCEPTION_MSG,
k));
}));
}
return mergedLabels;
},
mainThreadExecutor);
}
|
@Test
public void testLabelFailureWithInvalidEnricher() {
// validate labelFailure by enricher with wrong outputKeys
final Throwable cause = new RuntimeException("test exception");
final String invalidEnricherKey = "invalidKey";
final Set<FailureEnricher> failureEnrichers = new HashSet<>();
final FailureEnricher invalidEnricher =
new TestEnricher(
Collections.singletonMap(invalidEnricherKey, "enricherValue"),
"enricherKey");
failureEnrichers.add(invalidEnricher);
final CompletableFuture<Map<String, String>> result =
FailureEnricherUtils.labelFailure(
cause,
null,
ComponentMainThreadExecutorServiceAdapter.forMainThread(),
failureEnrichers);
// Ignoring labels
assertThatFuture(result).eventuallySucceeds().satisfies(labels -> labels.isEmpty());
}
|
@Override
public Set<Tuple> zRangeWithScores(byte[] key, long start, long end) {
if (executorService.getServiceManager().isResp3()) {
return read(key, ByteArrayCodec.INSTANCE, ZRANGE_ENTRY_V2, key, start, end, "WITHSCORES");
}
return read(key, ByteArrayCodec.INSTANCE, ZRANGE_ENTRY, key, start, end, "WITHSCORES");
}
|
@Test
public void testZRangeWithScores() {
StringRedisTemplate redisTemplate = new StringRedisTemplate();
redisTemplate.setConnectionFactory(new RedissonConnectionFactory(redisson));
redisTemplate.afterPropertiesSet();
redisTemplate.boundZSetOps("test").add("1", 10);
redisTemplate.boundZSetOps("test").add("2", 20);
redisTemplate.boundZSetOps("test").add("3", 30);
Set<ZSetOperations.TypedTuple<String>> objs = redisTemplate.boundZSetOps("test").rangeWithScores(0, 100);
assertThat(objs).hasSize(3);
assertThat(objs).containsExactlyInAnyOrder(ZSetOperations.TypedTuple.of("1", 10D),
ZSetOperations.TypedTuple.of("2", 20D),
ZSetOperations.TypedTuple.of("3", 30D));
}
|
String getJwtFromBearerAuthorization(HttpServletRequest req) {
String authorizationHeader = req.getHeader(HttpHeader.AUTHORIZATION.asString());
if (authorizationHeader == null || !authorizationHeader.startsWith(BEARER)) {
return null;
} else {
return authorizationHeader.substring(BEARER.length()).trim();
}
}
|
@Test
public void testParseTokenFromAuthHeaderNoBearer() {
JwtAuthenticator authenticator = new JwtAuthenticator(TOKEN_PROVIDER, JWT_TOKEN);
HttpServletRequest request = mock(HttpServletRequest.class);
expect(request.getHeader(HttpHeader.AUTHORIZATION.asString())).andReturn(BASIC_SCHEME + " " + EXPECTED_TOKEN);
replay(request);
String actualToken = authenticator.getJwtFromBearerAuthorization(request);
verify(request);
assertNull(actualToken);
}
|
@Override
public boolean isInStates(Set<String> statesFilter, long committedOffset) {
return statesFilter.contains(state.toLowerCaseString());
}
|
@Test
public void testIsInStates() {
ClassicGroup group = new ClassicGroup(new LogContext(), "groupId", EMPTY, Time.SYSTEM, mock(GroupCoordinatorMetricsShard.class));
assertTrue(group.isInStates(Collections.singleton("empty"), 0));
group.transitionTo(PREPARING_REBALANCE);
assertTrue(group.isInStates(Collections.singleton("preparingrebalance"), 0));
assertFalse(group.isInStates(Collections.singleton("PreparingRebalance"), 0));
group.transitionTo(COMPLETING_REBALANCE);
assertTrue(group.isInStates(new HashSet<>(Collections.singletonList("completingrebalance")), 0));
group.transitionTo(STABLE);
assertTrue(group.isInStates(Collections.singleton("stable"), 0));
assertFalse(group.isInStates(Collections.singleton("empty"), 0));
group.transitionTo(DEAD);
assertTrue(group.isInStates(new HashSet<>(Arrays.asList("dead", " ")), 0));
}
|
@Override
protected Future<KafkaBridgeStatus> createOrUpdate(Reconciliation reconciliation, KafkaBridge assemblyResource) {
KafkaBridgeStatus kafkaBridgeStatus = new KafkaBridgeStatus();
String namespace = reconciliation.namespace();
KafkaBridgeCluster bridge;
try {
bridge = KafkaBridgeCluster.fromCrd(reconciliation, assemblyResource, sharedEnvironmentProvider);
} catch (Exception e) {
LOGGER.warnCr(reconciliation, e);
StatusUtils.setStatusConditionAndObservedGeneration(assemblyResource, kafkaBridgeStatus, e);
return Future.failedFuture(new ReconciliationException(kafkaBridgeStatus, e));
}
KafkaClientAuthentication auth = assemblyResource.getSpec().getAuthentication();
List<CertSecretSource> trustedCertificates = assemblyResource.getSpec().getTls() == null ? Collections.emptyList() : assemblyResource.getSpec().getTls().getTrustedCertificates();
Promise<KafkaBridgeStatus> createOrUpdatePromise = Promise.promise();
boolean bridgeHasZeroReplicas = bridge.getReplicas() == 0;
String initCrbName = KafkaBridgeResources.initContainerClusterRoleBindingName(bridge.getCluster(), namespace);
ClusterRoleBinding initCrb = bridge.generateClusterRoleBinding();
LOGGER.debugCr(reconciliation, "Updating Kafka Bridge cluster");
kafkaBridgeServiceAccount(reconciliation, namespace, bridge)
.compose(i -> bridgeInitClusterRoleBinding(reconciliation, initCrbName, initCrb))
.compose(i -> deploymentOperations.scaleDown(reconciliation, namespace, bridge.getComponentName(), bridge.getReplicas(), operationTimeoutMs))
.compose(scale -> serviceOperations.reconcile(reconciliation, namespace, KafkaBridgeResources.serviceName(bridge.getCluster()), bridge.generateService()))
.compose(i -> MetricsAndLoggingUtils.metricsAndLogging(reconciliation, configMapOperations, bridge.logging(), null))
.compose(metricsAndLogging -> configMapOperations.reconcile(reconciliation, namespace, KafkaBridgeResources.metricsAndLogConfigMapName(reconciliation.name()), bridge.generateMetricsAndLogConfigMap(metricsAndLogging)))
.compose(i -> podDisruptionBudgetOperator.reconcile(reconciliation, namespace, bridge.getComponentName(), bridge.generatePodDisruptionBudget()))
.compose(i -> VertxUtil.authTlsHash(secretOperations, namespace, auth, trustedCertificates))
.compose(hash -> deploymentOperations.reconcile(reconciliation, namespace, bridge.getComponentName(), bridge.generateDeployment(Collections.singletonMap(Annotations.ANNO_STRIMZI_AUTH_HASH, Integer.toString(hash)), pfa.isOpenshift(), imagePullPolicy, imagePullSecrets)))
.compose(i -> deploymentOperations.scaleUp(reconciliation, namespace, bridge.getComponentName(), bridge.getReplicas(), operationTimeoutMs))
.compose(i -> deploymentOperations.waitForObserved(reconciliation, namespace, bridge.getComponentName(), 1_000, operationTimeoutMs))
.compose(i -> bridgeHasZeroReplicas ? Future.succeededFuture() : deploymentOperations.readiness(reconciliation, namespace, bridge.getComponentName(), 1_000, operationTimeoutMs))
.onComplete(reconciliationResult -> {
StatusUtils.setStatusConditionAndObservedGeneration(assemblyResource, kafkaBridgeStatus, reconciliationResult.mapEmpty().cause());
if (!bridgeHasZeroReplicas) {
int port = KafkaBridgeCluster.DEFAULT_REST_API_PORT;
if (bridge.getHttp() != null) {
port = bridge.getHttp().getPort();
}
kafkaBridgeStatus.setUrl(KafkaBridgeResources.url(bridge.getCluster(), namespace, port));
}
kafkaBridgeStatus.setReplicas(bridge.getReplicas());
kafkaBridgeStatus.setLabelSelector(bridge.getSelectorLabels().toSelectorString());
if (reconciliationResult.succeeded()) {
createOrUpdatePromise.complete(kafkaBridgeStatus);
} else {
createOrUpdatePromise.fail(new ReconciliationException(kafkaBridgeStatus, reconciliationResult.cause()));
}
});
return createOrUpdatePromise.future();
}
|
@Test
public void testCreateOrUpdateThrowsWhenCreateServiceThrows(VertxTestContext context) {
ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(true);
var mockBridgeOps = supplier.kafkaBridgeOperator;
DeploymentOperator mockDcOps = supplier.deploymentOperations;
PodDisruptionBudgetOperator mockPdbOps = supplier.podDisruptionBudgetOperator;
ConfigMapOperator mockCmOps = supplier.configMapOperations;
ServiceOperator mockServiceOps = supplier.serviceOperations;
String kbName = "foo";
String kbNamespace = "test";
KafkaBridge kb = ResourceUtils.createKafkaBridge(kbNamespace, kbName, image, 1,
BOOTSTRAP_SERVERS, KAFKA_BRIDGE_PRODUCER_SPEC, KAFKA_BRIDGE_CONSUMER_SPEC, KAFKA_BRIDGE_HTTP_SPEC, true);
KafkaBridgeCluster bridge = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kb, SHARED_ENV_PROVIDER);
kb.getSpec().setImage("some/different:image"); // Change the image to generate some differences
when(mockBridgeOps.get(kbNamespace, kbName)).thenReturn(kb);
when(mockBridgeOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kb));
when(mockBridgeOps.updateStatusAsync(any(), any(KafkaBridge.class))).thenReturn(Future.succeededFuture());
when(mockServiceOps.get(kbNamespace, bridge.getComponentName())).thenReturn(bridge.generateService());
when(mockDcOps.get(kbNamespace, bridge.getComponentName())).thenReturn(bridge.generateDeployment(Map.of(), true, null, null));
when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
ArgumentCaptor<String> serviceNamespaceCaptor = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<String> serviceNameCaptor = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<Service> serviceCaptor = ArgumentCaptor.forClass(Service.class);
when(mockServiceOps.reconcile(any(), serviceNamespaceCaptor.capture(), serviceNameCaptor.capture(), serviceCaptor.capture())).thenReturn(Future.succeededFuture());
ArgumentCaptor<String> dcNamespaceCaptor = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<String> dcNameCaptor = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<Deployment> dcCaptor = ArgumentCaptor.forClass(Deployment.class);
when(mockDcOps.reconcile(any(), dcNamespaceCaptor.capture(), dcNameCaptor.capture(), dcCaptor.capture())).thenReturn(Future.failedFuture("Failed"));
ArgumentCaptor<String> dcScaleUpNamespaceCaptor = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<String> dcScaleUpNameCaptor = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<Integer> dcScaleUpReplicasCaptor = ArgumentCaptor.forClass(Integer.class);
when(mockDcOps.scaleUp(any(), dcScaleUpNamespaceCaptor.capture(), dcScaleUpNameCaptor.capture(), dcScaleUpReplicasCaptor.capture(), anyLong())).thenReturn(Future.succeededFuture());
ArgumentCaptor<String> dcScaleDownNamespaceCaptor = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<String> dcScaleDownNameCaptor = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<Integer> dcScaleDownReplicasCaptor = ArgumentCaptor.forClass(Integer.class);
when(mockDcOps.scaleDown(any(), dcScaleDownNamespaceCaptor.capture(), dcScaleDownNameCaptor.capture(), dcScaleDownReplicasCaptor.capture(), anyLong())).thenReturn(Future.succeededFuture());
when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture());
when(mockBridgeOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaBridge())));
when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap())));
KafkaBridgeAssemblyOperator ops = new KafkaBridgeAssemblyOperator(vertx,
new PlatformFeaturesAvailability(true, kubernetesVersion),
new MockCertManager(), new PasswordGenerator(10, "a", "a"),
supplier,
ResourceUtils.dummyClusterOperatorConfig(VERSIONS));
Checkpoint async = context.checkpoint();
ops.createOrUpdate(new Reconciliation("test-trigger", KafkaBridge.RESOURCE_KIND, kbNamespace, kbName), kb)
.onComplete(context.failing(e -> async.flag()));
}
|
public void cleanup(QueryId queryId)
{
queryPartitionFileCounterMap.remove(queryId);
queryHiveMetadataResultMap.remove(queryId);
}
|
@Test
public void testCleanup()
{
HiveFileRenamer hiveFileRenamer = new HiveFileRenamer();
List<ConnectorMetadataUpdateHandle> requests = ImmutableList.of(TEST_HIVE_METADATA_UPDATE_REQUEST);
List<ConnectorMetadataUpdateHandle> results = hiveFileRenamer.getMetadataUpdateResults(requests, TEST_QUERY_ID);
assertEquals(results.size(), 1);
HiveMetadataUpdateHandle result = (HiveMetadataUpdateHandle) results.get(0);
assertEquals(result.getMetadataUpdate(), Optional.of("0"));
hiveFileRenamer.cleanup(TEST_QUERY_ID);
requests = ImmutableList.of(TEST_HIVE_METADATA_UPDATE_REQUEST);
results = hiveFileRenamer.getMetadataUpdateResults(requests, TEST_QUERY_ID);
assertEquals(results.size(), 1);
result = (HiveMetadataUpdateHandle) results.get(0);
assertEquals(result.getMetadataUpdate(), Optional.of("0"));
}
|
@Override
public String pluginNamed() {
return PluginEnum.LOGGING_TENCENT_CLS.getName();
}
|
@Test
public void testPluginNamed() {
Assertions.assertEquals(loggingTencentClsPluginDataHandler.pluginNamed(), PluginEnum.LOGGING_TENCENT_CLS.getName());
}
|
@Override
public boolean implies(Permission permission) {
if (permission instanceof FilePermission requestPermission) {
return !blockedFilePermission.implies(requestPermission) || tmpFilePermission.implies(requestPermission);
}
return true;
}
|
@Test
public void policy_restricts_modifying_home() {
assertThat(rule.implies(new FilePermission(Paths.get("/path/to/home/file").toAbsolutePath().toString(), "write"))).isFalse();
assertThat(rule.implies(new FilePermission(Paths.get("/path/to/home/file").toAbsolutePath().toString(), "execute"))).isFalse();
assertThat(rule.implies(new FilePermission(Paths.get("/path/to/home/file").toAbsolutePath().toString(), "delete"))).isFalse();
assertThat(rule.implies(new FilePermission(Paths.get("/path/to/home/file").toAbsolutePath().toString(), "read"))).isTrue();
assertThat(rule.implies(new FilePermission(Paths.get("/path/to/home/file").toAbsolutePath().toString(), "readlink"))).isTrue();
assertThat(rule.implies(new FilePermission(Paths.get("/path/to/home/extensions/file").toAbsolutePath().toString(), "write"))).isFalse();
assertThat(rule.implies(new FilePermission(Paths.get("/path/to/").toAbsolutePath().toString(), "write"))).isTrue();
}
|
@Override
public Batch toBatch() {
return new SparkBatch(
sparkContext, table, readConf, groupingKeyType(), taskGroups(), expectedSchema, hashCode());
}
|
@TestTemplate
public void testPartitionedTruncateString() throws Exception {
createPartitionedTable(spark, tableName, "truncate(4, data)");
SparkScanBuilder builder = scanBuilder();
TruncateFunction.TruncateString function = new TruncateFunction.TruncateString();
UserDefinedScalarFunc udf = toUDF(function, expressions(intLit(4), fieldRef("data")));
Predicate predicate = new Predicate("<>", expressions(udf, stringLit("data")));
pushFilters(builder, predicate);
Batch scan = builder.build().toBatch();
assertThat(scan.planInputPartitions().length).isEqualTo(5);
// NOT NotEqual
builder = scanBuilder();
predicate = new Not(predicate);
pushFilters(builder, predicate);
scan = builder.build().toBatch();
assertThat(scan.planInputPartitions().length).isEqualTo(5);
}
|
static long sizeOf(Mutation m) {
if (m.getOperation() == Mutation.Op.DELETE) {
return sizeOf(m.getKeySet());
}
long result = 0;
for (Value v : m.getValues()) {
switch (v.getType().getCode()) {
case ARRAY:
result += estimateArrayValue(v);
break;
case STRUCT:
throw new IllegalArgumentException("Structs are not supported in mutation.");
default:
result += estimatePrimitiveValue(v);
}
}
return result;
}
|
@Test
public void protos() throws Exception {
Mutation empty =
Mutation.newInsertOrUpdateBuilder("test")
.set("one")
.to(ByteArray.fromBase64(""), "customer.app.TestMessage")
.build();
Mutation nullValue =
Mutation.newInsertOrUpdateBuilder("test")
.set("one")
.to((ByteArray) null, "customer.app.TestMessage")
.build();
Mutation sample =
Mutation.newInsertOrUpdateBuilder("test")
.set("one")
.to(ByteArray.fromBase64("abcdabcd"), "customer.app.TestMessage")
.build();
Mutation nullArray =
Mutation.newInsertOrUpdateBuilder("test")
.set("one")
.toProtoMessageArray(null, "customer.app.TestMessage")
.build();
Mutation deleteBytes =
Mutation.delete("test", Key.of(ByteArray.copyFrom("some_bytes".getBytes(UTF_8))));
assertThat(MutationSizeEstimator.sizeOf(empty), is(0L));
assertThat(MutationSizeEstimator.sizeOf(nullValue), is(0L));
assertThat(MutationSizeEstimator.sizeOf(sample), is(6L));
assertThat(MutationSizeEstimator.sizeOf(nullArray), is(0L));
assertThat(MutationSizeEstimator.sizeOf(deleteBytes), is(10L));
}
|
void release() {
Arrays.stream(subpartitionCacheManagers).forEach(SubpartitionDiskCacheManager::release);
partitionFileWriter.release();
}
|
@Test
void testRelease() {
AtomicBoolean isReleased = new AtomicBoolean(false);
TestingTieredStorageMemoryManager memoryManager =
new TestingTieredStorageMemoryManager.Builder().build();
TestingPartitionFileWriter partitionFileWriter =
new TestingPartitionFileWriter.Builder()
.setReleaseRunnable(() -> isReleased.set(true))
.build();
DiskCacheManager diskCacheManager =
new DiskCacheManager(
TieredStorageIdMappingUtils.convertId(new ResultPartitionID()),
1,
1024,
memoryManager,
partitionFileWriter);
diskCacheManager.release();
assertThat(isReleased).isTrue();
}
|
public ModelMBeanInfo getMBeanInfo(Object defaultManagedBean, Object customManagedBean, String objectName) throws JMException {
if ((defaultManagedBean == null && customManagedBean == null) || objectName == null)
return null;
// skip proxy classes
if (defaultManagedBean != null && Proxy.isProxyClass(defaultManagedBean.getClass())) {
LOGGER.trace("Skip creating ModelMBeanInfo due proxy class {}", defaultManagedBean.getClass());
return null;
}
// maps and lists to contain information about attributes and operations
Map<String, ManagedAttributeInfo> attributes = new LinkedHashMap<>();
Set<ManagedOperationInfo> operations = new LinkedHashSet<>();
Set<ModelMBeanAttributeInfo> mBeanAttributes = new LinkedHashSet<>();
Set<ModelMBeanOperationInfo> mBeanOperations = new LinkedHashSet<>();
Set<ModelMBeanNotificationInfo> mBeanNotifications = new LinkedHashSet<>();
// extract details from default managed bean
if (defaultManagedBean != null) {
extractAttributesAndOperations(defaultManagedBean.getClass(), attributes, operations);
extractMbeanAttributes(defaultManagedBean, attributes, mBeanAttributes, mBeanOperations);
extractMbeanOperations(defaultManagedBean, operations, mBeanOperations);
extractMbeanNotifications(defaultManagedBean, mBeanNotifications);
}
// extract details from custom managed bean
if (customManagedBean != null) {
extractAttributesAndOperations(customManagedBean.getClass(), attributes, operations);
extractMbeanAttributes(customManagedBean, attributes, mBeanAttributes, mBeanOperations);
extractMbeanOperations(customManagedBean, operations, mBeanOperations);
extractMbeanNotifications(customManagedBean, mBeanNotifications);
}
// create the ModelMBeanInfo
String name = getName(customManagedBean != null ? customManagedBean : defaultManagedBean, objectName);
String description = getDescription(customManagedBean != null ? customManagedBean : defaultManagedBean, objectName);
ModelMBeanAttributeInfo[] arrayAttributes = mBeanAttributes.toArray(new ModelMBeanAttributeInfo[mBeanAttributes.size()]);
ModelMBeanOperationInfo[] arrayOperations = mBeanOperations.toArray(new ModelMBeanOperationInfo[mBeanOperations.size()]);
ModelMBeanNotificationInfo[] arrayNotifications = mBeanNotifications.toArray(new ModelMBeanNotificationInfo[mBeanNotifications.size()]);
ModelMBeanInfo info = new ModelMBeanInfoSupport(name, description, arrayAttributes, null, arrayOperations, arrayNotifications);
LOGGER.trace("Created ModelMBeanInfo {}", info);
return info;
}
|
@Test
public void testNullInputs() throws JMException {
// at least one of the first parameters should be not null
assertThat(mbeanInfoAssembler.getMBeanInfo(null, null, "")).isNull();
// mbean should be not null
assertThat(mbeanInfoAssembler.getMBeanInfo(testMbean, testMbean, null)).isNull();
// it should return something if at least one of the first parameters
// are
// not null
NotManagedMBean notManagedMbean = new NotManagedMBean();
assertThat(mbeanInfoAssembler.getMBeanInfo(null, notManagedMbean, "someName")).isNotNull();
assertThat(mbeanInfoAssembler.getMBeanInfo(notManagedMbean, null, "someName")).isNotNull();
}
|
public String getShare() {
return share;
}
|
@Test
void shareForValidURIShouldBeExtracted4() {
var remoteConf = context.getEndpoint("azure-files://account/share/path", FilesEndpoint.class).getConfiguration();
assertEquals("share", remoteConf.getShare());
}
|
public void readChapterSubFrame(@NonNull FrameHeader frameHeader, @NonNull Chapter chapter)
throws IOException, ID3ReaderException {
Log.d(TAG, "Handling subframe: " + frameHeader.toString());
int frameStartPosition = getPosition();
switch (frameHeader.getId()) {
case FRAME_ID_TITLE:
chapter.setTitle(readEncodingAndString(frameHeader.getSize()));
Log.d(TAG, "Found title: " + chapter.getTitle());
break;
case FRAME_ID_LINK:
readEncodingAndString(frameHeader.getSize()); // skip description
String url = readIsoStringNullTerminated(frameStartPosition + frameHeader.getSize() - getPosition());
try {
String decodedLink = URLDecoder.decode(url, "ISO-8859-1");
chapter.setLink(decodedLink);
Log.d(TAG, "Found link: " + chapter.getLink());
} catch (IllegalArgumentException iae) {
Log.w(TAG, "Bad URL found in ID3 data");
}
break;
case FRAME_ID_PICTURE:
byte encoding = readByte();
String mime = readIsoStringNullTerminated(frameHeader.getSize());
byte type = readByte();
String description = readEncodedString(encoding, frameHeader.getSize());
Log.d(TAG, "Found apic: " + mime + "," + description);
if (MIME_IMAGE_URL.equals(mime)) {
String link = readIsoStringNullTerminated(frameHeader.getSize());
Log.d(TAG, "Link: " + link);
if (TextUtils.isEmpty(chapter.getImageUrl()) || type == IMAGE_TYPE_COVER) {
chapter.setImageUrl(link);
}
} else {
int alreadyConsumed = getPosition() - frameStartPosition;
int rawImageDataLength = frameHeader.getSize() - alreadyConsumed;
if (TextUtils.isEmpty(chapter.getImageUrl()) || type == IMAGE_TYPE_COVER) {
chapter.setImageUrl(EmbeddedChapterImage.makeUrl(getPosition(), rawImageDataLength));
}
}
break;
default:
Log.d(TAG, "Unknown chapter sub-frame.");
break;
}
// Skip garbage to fill frame completely
// This also asserts that we are not reading too many bytes from this frame.
int alreadyConsumed = getPosition() - frameStartPosition;
skipBytes(frameHeader.getSize() - alreadyConsumed);
}
|
@Test
public void testReadTitleWithGarbage() throws IOException, ID3ReaderException {
byte[] titleSubframeContent = {
ID3Reader.ENCODING_ISO,
'A', // Title
0, // Null-terminated
42, 42, 42, 42 // Garbage, should be ignored
};
FrameHeader header = new FrameHeader(ChapterReader.FRAME_ID_TITLE, titleSubframeContent.length, (short) 0);
CountingInputStream inputStream = new CountingInputStream(new ByteArrayInputStream(titleSubframeContent));
ChapterReader reader = new ChapterReader(inputStream);
Chapter chapter = new Chapter();
reader.readChapterSubFrame(header, chapter);
assertEquals("A", chapter.getTitle());
// Should skip the garbage and point to the next frame
assertEquals(titleSubframeContent.length, reader.getPosition());
}
|
@Override
public Optional<Request.RequestType> getRequestType() { return Optional.of(Request.RequestType.READ); }
|
@Test
void testRequestType() throws Exception {
IOUtils.copyDirectory(new File(testDir, "config_yql"), new File(tempDir), 1);
generateComponentsConfigForActive();
configurer.reloadConfig();
SearchHandler newSearchHandler = fetchSearchHandler(configurer);
try (RequestHandlerTestDriver newDriver = new RequestHandlerTestDriver(newSearchHandler)) {
RequestHandlerTestDriver.MockResponseHandler responseHandler = newDriver.sendRequest(
"http://localhost/search/?query=foo");
responseHandler.readAll();
assertEquals(Request.RequestType.READ, responseHandler.getResponse().getRequestType());
}
}
|
public static Value that(Object actual) {
return new Value(parseIfJsonOrXmlString(actual), true);
}
|
@Test
void testJavaSet() {
Set<String> set = new HashSet();
set.add("foo");
set.add("bar");
Match.that(set).containsOnly("['foo', 'bar']");
}
|
@Override
public CompletableFuture<T> toCompletableFuture()
{
return _task.toCompletionStage().toCompletableFuture();
}
|
@Test
public void testToCompletableFuture_fail() throws Exception
{
CompletableFuture completableFuture = createTestFailedStage(EXCEPTION).toCompletableFuture();
try {
completableFuture.get();
} catch (Exception e) {
assertEquals(e.getCause(), EXCEPTION);
}
}
|
@Override
protected TableOperations newTableOps(TableIdentifier tableIdentifier) {
String fileIOImpl = DEFAULT_FILE_IO_IMPL;
if (catalogProperties.containsKey(CatalogProperties.FILE_IO_IMPL)) {
fileIOImpl = catalogProperties.get(CatalogProperties.FILE_IO_IMPL);
}
// Initialize a fresh FileIO for each TableOperations created, because some FileIO
// implementations such as S3FileIO can become bound to a single S3 bucket. Additionally,
// FileIO implementations often support only a finite set of one or more URI schemes (i.e.
// S3FileIO only supports s3/s3a/s3n, and even ResolvingFileIO only supports the combination
// of schemes registered for S3FileIO and HadoopFileIO). Individual catalogs may need to
// support tables across different cloud/storage providers with disjoint FileIO implementations.
FileIO fileIO = fileIOFactory.newFileIO(fileIOImpl, catalogProperties, conf);
closeableGroup.addCloseable(fileIO);
return new SnowflakeTableOperations(snowflakeClient, fileIO, catalogName, tableIdentifier);
}
|
@Test
public void testTableNameFromTableOperations() {
SnowflakeTableOperations castedTableOps =
(SnowflakeTableOperations)
catalog.newTableOps(TableIdentifier.of("DB_1", "SCHEMA_1", "TAB_1"));
assertThat(castedTableOps.fullTableName()).isEqualTo("slushLog.DB_1.SCHEMA_1.TAB_1");
}
|
@Override
public Flux<BooleanResponse<ExpireCommand>> expire(Publisher<ExpireCommand> commands) {
return execute(commands, command -> {
Assert.notNull(command.getKey(), "Key must not be null!");
byte[] keyBuf = toByteArray(command.getKey());
Mono<Boolean> m = write(keyBuf, StringCodec.INSTANCE, EXPIRE, keyBuf, command.getTimeout().getSeconds());
return m.map(v -> new BooleanResponse<>(command, v));
});
}
|
@Test
public void testExpiration() {
RedissonConnectionFactory factory = new RedissonConnectionFactory(redisson);
ReactiveStringRedisTemplate t = new ReactiveStringRedisTemplate(factory);
t.opsForValue().set("123", "4343").block();
t.expire("123", Duration.ofMillis(1001)).block();
assertThat(t.getExpire("123").block().toMillis()).isBetween(900L, 1000L);
}
|
public static MongoSinkConfig load(String yamlFile) throws IOException {
final ObjectMapper mapper = new ObjectMapper(new YAMLFactory());
final MongoSinkConfig cfg = mapper.readValue(new File(yamlFile), MongoSinkConfig.class);
return cfg;
}
|
@Test
public void testLoadYamlConfig() throws IOException {
final File yaml = TestHelper.getFile(MongoSinkConfigTest.class, "mongoSinkConfig.yaml");
final MongoSinkConfig cfg = MongoSinkConfig.load(yaml.getAbsolutePath());
assertEquals(cfg.getMongoUri(), TestHelper.URI);
assertEquals(cfg.getDatabase(), TestHelper.DB);
assertEquals(cfg.getCollection(), TestHelper.COLL);
assertEquals(cfg.getBatchSize(), TestHelper.BATCH_SIZE);
assertEquals(cfg.getBatchTimeMs(), TestHelper.BATCH_TIME);
}
|
@Override public Boolean trySample(MessagingRequest request) {
return delegate.trySample(request);
}
|
@Test void nullOnNull() {
assertThat(sampler.trySample(null))
.isNull();
}
|
@Override
public MetricsCollector create(final MetricConfiguration metricConfig) {
switch (metricConfig.getType()) {
case COUNTER:
return new PrometheusMetricsCounterCollector(metricConfig);
case GAUGE:
return new PrometheusMetricsGaugeCollector(metricConfig);
case HISTOGRAM:
return new PrometheusMetricsHistogramCollector(metricConfig);
case SUMMARY:
return new PrometheusMetricsSummaryCollector(metricConfig);
case GAUGE_METRIC_FAMILY:
return new PrometheusMetricsGaugeMetricFamilyCollector(metricConfig);
default:
throw new UnsupportedOperationException(String.format("Can not support type `%s`.", metricConfig.getType()));
}
}
|
@Test
void assertCreateSummaryCollector() {
MetricConfiguration config = new MetricConfiguration("test_summary", MetricCollectorType.SUMMARY, null, Collections.emptyList(), Collections.emptyMap());
assertThat(new PrometheusMetricsCollectorFactory().create(config), instanceOf(PrometheusMetricsSummaryCollector.class));
}
|
@VisibleForTesting
static byte[] padBigEndianBytes(byte[] bigEndianBytes, int newLength) {
if (bigEndianBytes.length == newLength) {
return bigEndianBytes;
} else if (bigEndianBytes.length < newLength) {
byte[] result = new byte[newLength];
if (bigEndianBytes.length == 0) {
return result;
}
int start = newLength - bigEndianBytes.length;
if (bigEndianBytes[0] < 0) {
Arrays.fill(result, 0, start, (byte) 0xFF);
}
System.arraycopy(bigEndianBytes, 0, result, start, bigEndianBytes.length);
return result;
}
throw new IllegalArgumentException(
String.format(
"Buffer size of %d is larger than requested size of %d",
bigEndianBytes.length, newLength));
}
|
@Test
public void testPadBigEndianBytesNegative() {
BigInteger bigInt = new BigInteger("-12345");
byte[] bytes = bigInt.toByteArray();
byte[] paddedBytes = DecimalVectorUtil.padBigEndianBytes(bytes, 16);
assertThat(paddedBytes).hasSize(16);
BigInteger result = new BigInteger(paddedBytes);
assertThat(result).isEqualTo(bigInt);
}
|
public static <T> CompletionStage<T> recover(CompletionStage<T> completionStage, Function<Throwable, T> exceptionHandler){
return completionStage.exceptionally(exceptionHandler);
}
|
@Test
public void shouldReturnExceptionFromRecoveryMethod() {
CompletableFuture<String> future = new CompletableFuture<>();
future.completeExceptionally(new RuntimeException("bla"));
RuntimeException exception = new RuntimeException("blub");
Function<Throwable, String> fallback = (e) -> {
throw exception;
};
assertThatThrownBy(() -> recover(future, fallback).toCompletableFuture()
.get(1, TimeUnit.SECONDS)).hasCause(exception);
}
|
@Nonnull
@Override
public ILogger getLogger(@Nonnull String name) {
checkNotNull(name, "name must not be null");
return getOrPutIfAbsent(mapLoggers, name, loggerConstructor);
}
|
@Test
public void testLog_whenGetLevel_thenDefaultLevelIsReturned() {
ILogger logger = loggingService.getLogger("test");
assertNotNull(logger.getLevel());
}
|
static Map<Address, List<Shard>> assignShards(Collection<Shard> shards, Collection<Address> addresses) {
Map<String, List<String>> assignment = addresses.stream() // host -> [indexShard...]
.map(Address::getHost).distinct().collect(toMap(identity(), a -> new ArrayList<>()));
Map<String, List<String>> nodeCandidates = shards.stream() // indexShard -> [host...]
.collect(groupingBy(Shard::indexShard, mapping(Shard::getIp, toList())));
// Make the assignment
nodeCandidates.forEach((indexShard, hosts) -> hosts.stream()
.map(assignment::get)
.filter(Objects::nonNull)
.min(comparingInt(List::size))
.orElseThrow(() -> new IllegalStateException("Selected members do not contain shard '" + indexShard + "'"))
.add(indexShard));
// Transform the results
Map<String, List<Address>> addressMap = addresses.stream().collect(groupingBy(Address::getHost, toList()));
Map<String, Shard> shardMap = shards.stream().collect(toMap(s -> s.indexShard() + "@" + s.getIp(), identity()));
return assignment.entrySet().stream()
.flatMap(e -> {
List<Address> a = addressMap.get(e.getKey());
List<Shard> s = e.getValue().stream()
.map(indexShard -> shardMap.get(indexShard + "@" + e.getKey())).toList();
int c = (int) Math.ceil((double) s.size() / a.size());
return IntStream.range(0, a.size())
.mapToObj(i -> entry(a.get(i), List.copyOf(s.subList(i * c, Math.min((i + 1) * c, s.size())))));
}).collect(toMap(Entry::getKey, Entry::getValue));
}
|
@Test
public void given_multipleReplicasForEachShard_when_assignShards_then_shouldAssignOneReplicaOnly() {
List<Shard> shards = newArrayList(
new Shard("elastic-index", 0, Prirep.p, 10, "STARTED", "10.0.0.1", "10.0.0.1:9200", "node1"),
new Shard("elastic-index", 0, Prirep.r, 10, "STARTED", "10.0.0.2", "10.0.0.1:9200", "node2"),
new Shard("elastic-index", 0, Prirep.r, 10, "STARTED", "10.0.0.3", "10.0.0.1:9200", "node3"),
new Shard("elastic-index", 1, Prirep.p, 10, "STARTED", "10.0.0.2", "10.0.0.1:9200", "node2"),
new Shard("elastic-index", 1, Prirep.r, 10, "STARTED", "10.0.0.3", "10.0.0.1:9200", "node3"),
new Shard("elastic-index", 1, Prirep.r, 10, "STARTED", "10.0.0.1", "10.0.0.1:9200", "node1"),
new Shard("elastic-index", 2, Prirep.p, 10, "STARTED", "10.0.0.3", "10.0.0.1:9200", "node3"),
new Shard("elastic-index", 2, Prirep.r, 10, "STARTED", "10.0.0.1", "10.0.0.1:9200", "node1"),
new Shard("elastic-index", 2, Prirep.r, 10, "STARTED", "10.0.0.2", "10.0.0.1:9200", "node2")
);
Collections.shuffle(shards, new Random());
List<Address> addresses = addresses("10.0.0.1", "10.0.0.2", "10.0.0.3");
Map<Address, List<Shard>> assignment = ElasticSourcePMetaSupplier.assignShards(shards, addresses);
assertThat(assignment)
.containsOnlyKeys(addresses)
.allSatisfy((address, shardList) -> assertThat(shardList)
.hasSize(1) // shards are distributed evenly
.allMatch(shard -> shard.getIp().equals(address.getHost()))) // shards are correctly assigned
// all shards are assigned
.satisfies(a -> assertThat(a.values().stream().flatMap(List::stream).map(Shard::indexShard))
.containsExactlyInAnyOrder("elastic-index-0", "elastic-index-1", "elastic-index-2"));
}
|
public RepositoryElementInterface dataNodeToElement( DataNode rootNode ) throws KettleException {
SlaveServer slaveServer = new SlaveServer();
dataNodeToElement( rootNode, slaveServer );
return slaveServer;
}
|
@Test
public void testDataNodeToElement() throws KettleException {
SlaveServer slaveServer = new SlaveServer();
slaveDelegate.dataNodeToElement( mockDataNode, slaveServer );
Assert.assertEquals( PROP_HOST_NAME_VALUE, slaveServer.getHostname() );
Assert.assertEquals( PROP_USERNAME_VALUE, slaveServer.getUsername() );
Assert.assertEquals( PROP_PASSWORD_VALUE, slaveServer.getPassword() );
Assert.assertEquals( PROP_PORT_VALUE, slaveServer.getPort() );
Assert.assertEquals( PROP_PROXY_HOST_NAME_VALUE, slaveServer.getProxyHostname() );
Assert.assertEquals( PROP_PROXY_PORT_VALUE, slaveServer.getProxyPort() );
Assert.assertEquals( PROP_WEBAPP_NAME_VALUE, slaveServer.getWebAppName() );
Assert.assertEquals( PROP_NON_PROXY_HOSTS_VALUE, slaveServer.getNonProxyHosts() );
Assert.assertEquals( PROP_MASTER_VALUE, slaveServer.isMaster() );
Assert.assertEquals( PROP_USE_HTTPS_PROTOCOL_VALUE, slaveServer.isSslMode() );
}
|
public static void trimRecordTemplate(RecordTemplate recordTemplate, MaskTree override, final boolean failOnMismatch)
{
trimRecordTemplate(recordTemplate.data(), recordTemplate.schema(), override, failOnMismatch);
}
|
@Test
public void testUnionDataMap() throws CloneNotSupportedException
{
UnionTest foo = new UnionTest();
foo.setUnionEmpty(new UnionTest.UnionEmpty());
UnionTest expected = foo.copy();
((DataMap) foo.getUnionEmpty().data()).put("foo", "bar");
RestUtils.trimRecordTemplate(foo, false);
Assert.assertEquals(foo, expected);
// Primitive case
foo = new UnionTest();
UnionTest.UnionWithNull bar = new UnionTest.UnionWithNull();
bar.setBoolean(true);
foo.setUnionWithNull(bar);
expected = foo.copy();
((DataMap)foo.getUnionWithNull().data()).put("foo", "bar");
Assert.assertEquals(((DataMap) foo.getUnionWithNull().data()).size(), 2);
RestUtils.trimRecordTemplate(foo, false);
Assert.assertEquals(foo, expected);
// Complex case
foo = new UnionTest();
bar = new UnionTest.UnionWithNull();
bar.setMap(new LongMap());
foo.setUnionWithNull(bar);
expected = foo.copy();
expected.getUnionWithNull().getMap().put("foo", 1L);
foo.getUnionWithNull().getMap().data().put("foo", 1L);
foo.data().put("foo", "bar");
Assert.assertEquals(((DataMap) foo.getUnionWithNull().data()).size(), 1);
RestUtils.trimRecordTemplate(foo, false);
Assert.assertEquals(foo, expected);
}
|
public long minOffset(MessageQueue mq) throws MQClientException {
return this.mQClientFactory.getMQAdminImpl().minOffset(mq);
}
|
@Test
public void testMinOffset() throws MQClientException {
assertEquals(0, defaultMQPushConsumerImpl.minOffset(createMessageQueue()));
}
|
public CompletionStage<Void> migrate(MigrationSet set) {
InterProcessLock lock = new InterProcessSemaphoreMutex(client.unwrap(), ZKPaths.makePath(lockPath, set.id()));
CompletionStage<Void> lockStage = lockAsync(lock, lockMax.toMillis(), TimeUnit.MILLISECONDS, executor);
return lockStage.thenCompose(__ -> runMigrationInLock(lock, set));
}
|
@Test
public void testChecksumPathError() {
CuratorOp op1 = client.transactionOp().create().forPath("/test2");
CuratorOp op2 = client.transactionOp().create().forPath("/test2/bar");
Migration migration = () -> Arrays.asList(op1, op2);
MigrationSet migrationSet = MigrationSet.build("1", Collections.singletonList(migration));
complete(manager.migrate(migrationSet));
CuratorOp op2Changed = client.transactionOp().create().forPath("/test/bar");
migration = () -> Arrays.asList(op1, op2Changed);
migrationSet = MigrationSet.build("1", Collections.singletonList(migration));
try {
complete(manager.migrate(migrationSet));
fail("Should throw");
} catch (Throwable e) {
assertTrue(Throwables.getRootCause(e) instanceof MigrationException);
}
}
|
@VisibleForTesting
static Document buildQuery(TupleDomain<ColumnHandle> tupleDomain)
{
Document query = new Document();
if (tupleDomain.getDomains().isPresent()) {
for (Map.Entry<ColumnHandle, Domain> entry : tupleDomain.getDomains().get().entrySet()) {
MongoColumnHandle column = (MongoColumnHandle) entry.getKey();
query.putAll(buildPredicate(column, entry.getValue()));
}
}
return query;
}
|
@Test
public void testBuildQueryOr()
{
TupleDomain<ColumnHandle> tupleDomain = TupleDomain.withColumnDomains(ImmutableMap.of(
COL1, Domain.create(ValueSet.ofRanges(lessThan(BIGINT, 100L), greaterThan(BIGINT, 200L)), false)));
Document query = MongoSession.buildQuery(tupleDomain);
Document expected = new Document("$or", asList(
new Document(COL1.getName(), new Document("$lt", 100L)),
new Document(COL1.getName(), new Document("$gt", 200L))));
assertEquals(query, expected);
}
|
void cleanExpiredRequestInQueue(final BlockingQueue<Runnable> blockingQueue, final long maxWaitTimeMillsInQueue) {
while (true) {
try {
if (!blockingQueue.isEmpty()) {
final Runnable runnable = blockingQueue.peek();
if (null == runnable) {
break;
}
final RequestTask rt = castRunnable(runnable);
if (rt == null || rt.isStopRun()) {
break;
}
final long behind = System.currentTimeMillis() - rt.getCreateTimestamp();
if (behind >= maxWaitTimeMillsInQueue) {
if (blockingQueue.remove(runnable)) {
rt.setStopRun(true);
rt.returnResponse(RemotingSysResponseCode.SYSTEM_BUSY, String.format("[TIMEOUT_CLEAN_QUEUE]broker busy, start flow control for a while, period in queue: %sms, size of queue: %d", behind, blockingQueue.size()));
if (System.currentTimeMillis() - jstackTime > 15000) {
jstackTime = System.currentTimeMillis();
LOGGER.warn("broker jstack \n " + UtilAll.jstack());
}
}
} else {
break;
}
} else {
break;
}
} catch (Throwable ignored) {
}
}
}
|
@Test
public void testCleanExpiredRequestInQueue() throws Exception {
BrokerFastFailure brokerFastFailure = new BrokerFastFailure(brokerController);
BlockingQueue<Runnable> queue = new LinkedBlockingQueue<>();
brokerFastFailure.cleanExpiredRequestInQueue(queue, 1);
assertThat(queue.size()).isZero();
//Normal Runnable
Runnable runnable = new Runnable() {
@Override
public void run() {
}
};
queue.add(runnable);
assertThat(queue.size()).isEqualTo(1);
brokerFastFailure.cleanExpiredRequestInQueue(queue, 1);
assertThat(queue.size()).isEqualTo(1);
queue.clear();
//With expired request
RequestTask expiredRequest = new RequestTask(runnable, null, null);
queue.add(new FutureTaskExt<>(expiredRequest, null));
TimeUnit.MILLISECONDS.sleep(100);
RequestTask requestTask = new RequestTask(runnable, null, null);
queue.add(new FutureTaskExt<>(requestTask, null));
assertThat(queue.size()).isEqualTo(2);
brokerFastFailure.cleanExpiredRequestInQueue(queue, 100);
assertThat(queue.size()).isEqualTo(1);
assertThat(((FutureTaskExt) queue.peek()).getRunnable()).isEqualTo(requestTask);
}
|
public Map<String, String> build() {
Map<String, String> builder = new HashMap<>();
configureFileSystem(builder);
configureNetwork(builder);
configureCluster(builder);
configureSecurity(builder);
configureOthers(builder);
LOGGER.info("Elasticsearch listening on [HTTP: {}:{}, TCP: {}:{}]",
builder.get(ES_HTTP_HOST_KEY), builder.get(ES_HTTP_PORT_KEY),
builder.get(ES_TRANSPORT_HOST_KEY), builder.get(ES_TRANSPORT_PORT_KEY));
return builder;
}
|
@Test
public void configureSecurity_whenHttpKeystoreProvided_shouldAddHttpProperties() throws Exception {
Props props = minProps(true);
File keystore = temp.newFile("keystore.p12");
File truststore = temp.newFile("truststore.p12");
File httpKeystore = temp.newFile("http-keystore.p12");
props.set(CLUSTER_SEARCH_PASSWORD.getKey(), "qwerty");
props.set(CLUSTER_ES_KEYSTORE.getKey(), keystore.getAbsolutePath());
props.set(CLUSTER_ES_TRUSTSTORE.getKey(), truststore.getAbsolutePath());
props.set(CLUSTER_ES_HTTP_KEYSTORE.getKey(), httpKeystore.getAbsolutePath());
EsSettings settings = new EsSettings(props, new EsInstallation(props), system);
Map<String, String> outputParams = settings.build();
assertThat(outputParams)
.containsEntry("xpack.security.http.ssl.enabled", "true")
.containsEntry("xpack.security.http.ssl.keystore.path", httpKeystore.getName());
}
|
@Override
public void initialize(PulsarClient client) {
this.pulsarClient = (PulsarClientImpl) client;
ClientConfigurationData config = pulsarClient.getConfiguration();
if (config != null) {
this.primaryAuthentication = config.getAuthentication();
this.primaryTlsTrustCertsFilePath = config.getTlsTrustCertsFilePath();
this.primaryTlsTrustStorePath = config.getTlsTrustStorePath();
this.primaryTlsTrustStorePassword = config.getTlsTrustStorePassword();
}
// start to probe primary cluster active or not
this.executor.scheduleAtFixedRate(catchingAndLoggingThrowables(() -> {
if (currentPulsarServiceUrl.equals(primary)) {
// current service url is primary, probe whether it is down
probeAndUpdateServiceUrl(secondary, secondaryAuthentications, secondaryTlsTrustCertsFilePaths,
secondaryTlsTrustStorePaths, secondaryTlsTrustStorePasswords);
} else {
// current service url is secondary, probe whether it is down
probeAndUpdateServiceUrl(primary, primaryAuthentication, primaryTlsTrustCertsFilePath,
primaryTlsTrustStorePath, primaryTlsTrustStorePassword);
// secondary cluster is up, check whether need to switch back to primary or not
if (!currentPulsarServiceUrl.equals(primary)) {
probeAndCheckSwitchBack(primary, primaryAuthentication, primaryTlsTrustCertsFilePath,
primaryTlsTrustStorePath, primaryTlsTrustStorePassword);
}
}
}), intervalMs, intervalMs, TimeUnit.MILLISECONDS);
}
|
@Test
public void testInitialize() throws Exception {
String primary = "pulsar://localhost:6650";
String secondary = "pulsar://localhost:6651";
long failoverDelay = 10;
long switchBackDelay = 10;
long checkInterval = 1_000;
ClientConfigurationData configurationData = new ClientConfigurationData();
@Cleanup
ServiceUrlProvider provider = AutoClusterFailover.builder()
.primary(primary)
.secondary(Collections.singletonList(secondary))
.failoverDelay(failoverDelay, TimeUnit.MILLISECONDS)
.switchBackDelay(switchBackDelay, TimeUnit.MILLISECONDS)
.checkInterval(checkInterval, TimeUnit.MILLISECONDS)
.build();
AutoClusterFailover autoClusterFailover = Mockito.spy((AutoClusterFailover) provider);
PulsarClientImpl pulsarClient = mock(PulsarClientImpl.class);
ConnectionPool connectionPool = mock(ConnectionPool.class);
when(pulsarClient.getCnxPool()).thenReturn(connectionPool);
Mockito.doReturn(false).when(autoClusterFailover).probeAvailable(primary);
Mockito.doReturn(true).when(autoClusterFailover).probeAvailable(secondary);
Mockito.doReturn(configurationData).when(pulsarClient).getConfiguration();
autoClusterFailover.initialize(pulsarClient);
for (int i = 0; i < 2; i++) {
Awaitility.await().untilAsserted(() ->
assertEquals(autoClusterFailover.getServiceUrl(), secondary));
assertEquals(autoClusterFailover.getFailedTimestamp(), -1);
// primary cluster came back
Mockito.doReturn(true).when(autoClusterFailover).probeAvailable(primary);
Awaitility.await().untilAsserted(() ->
assertEquals(autoClusterFailover.getServiceUrl(), primary));
assertEquals(autoClusterFailover.getRecoverTimestamp(), -1);
assertEquals(autoClusterFailover.getFailedTimestamp(), -1);
Mockito.doReturn(false).when(autoClusterFailover).probeAvailable(primary);
}
}
|
@Override
public Iterable<Link> getLinks() {
checkPermission(LINK_READ);
return store.getLinks();
}
|
@Test
public void getLinks() {
Link l1 = addLink(DID1, P1, DID2, P2, DIRECT);
Link l2 = addLink(DID2, P2, DID1, P1, DIRECT);
Link l3 = addLink(DID3, P3, DID2, P1, DIRECT);
Link l4 = addLink(DID2, P1, DID3, P3, DIRECT);
assertEquals("incorrect link count", 4, service.getLinkCount());
Set<Link> links = service.getLinks(cp(DID1, P1));
assertEquals("incorrect links", ImmutableSet.of(l1, l2), links);
links = service.getEgressLinks(cp(DID1, P1));
assertEquals("incorrect links", ImmutableSet.of(l1), links);
links = service.getIngressLinks(cp(DID1, P1));
assertEquals("incorrect links", ImmutableSet.of(l2), links);
links = service.getDeviceLinks(DID2);
assertEquals("incorrect links", ImmutableSet.of(l1, l2, l3, l4), links);
links = service.getDeviceLinks(DID3);
assertEquals("incorrect links", ImmutableSet.of(l3, l4), links);
links = service.getDeviceEgressLinks(DID2);
assertEquals("incorrect links", ImmutableSet.of(l2, l4), links);
links = service.getDeviceIngressLinks(DID2);
assertEquals("incorrect links", ImmutableSet.of(l1, l3), links);
}
|
@PublicEvolving
public static <IN, OUT> TypeInformation<OUT> getMapReturnTypes(
MapFunction<IN, OUT> mapInterface, TypeInformation<IN> inType) {
return getMapReturnTypes(mapInterface, inType, null, false);
}
|
@SuppressWarnings({"unchecked", "rawtypes"})
@Test
void testChainedGenericsNotInSuperclass() {
// use TypeExtractor
RichMapFunction<?, ?> function =
new RichMapFunction<ChainedTwo<Integer>, ChainedTwo<Integer>>() {
private static final long serialVersionUID = 1L;
@Override
public ChainedTwo<Integer> map(ChainedTwo<Integer> value) throws Exception {
return null;
}
};
TypeInformation<?> ti =
TypeExtractor.getMapReturnTypes(
function,
(TypeInformation)
TypeInformation.of(
new TypeHint<Tuple3<String, Long, Integer>>() {}));
assertThat(ti.isTupleType()).isTrue();
assertThat(ti.getArity()).isEqualTo(3);
TupleTypeInfo<?> tti = (TupleTypeInfo<?>) ti;
assertThat(tti.getTypeClass()).isEqualTo(ChainedTwo.class);
assertThat(tti.getTypeAt(0)).isEqualTo(BasicTypeInfo.STRING_TYPE_INFO);
assertThat(tti.getTypeAt(1)).isEqualTo(BasicTypeInfo.LONG_TYPE_INFO);
assertThat(tti.getTypeAt(2)).isEqualTo(BasicTypeInfo.INT_TYPE_INFO);
}
|
public static List<String> getServerIdentities(X509Certificate x509Certificate) {
List<String> names = new ArrayList<>();
for (CertificateIdentityMapping mapping : serverCertMapping) {
List<String> identities = mapping.mapIdentity(x509Certificate);
Log.debug("CertificateManager: " + mapping.name() + " returned " + identities.toString());
if (!identities.isEmpty()) {
names.addAll(identities);
break;
}
}
return names;
}
|
@Test
public void testServerIdentitiesDnsSrv() throws Exception
{
// Setup fixture.
final String subjectCommonName = "MySubjectCommonName";
final String subjectAltNameDnsSrv = "MySubjectAltNameXmppAddr";
final X509v3CertificateBuilder builder = new JcaX509v3CertificateBuilder(
new X500Name( "CN=MyIssuer" ), // Issuer
BigInteger.valueOf( Math.abs( new SecureRandom().nextInt() ) ), // Random serial number
new Date( System.currentTimeMillis() - ( 1000L * 60 * 60 * 24 * 30 ) ), // Not before 30 days ago
new Date( System.currentTimeMillis() + ( 1000L * 60 * 60 * 24 * 99 ) ), // Not after 99 days from now
new X500Name( "CN=" + subjectCommonName ), // Subject
subjectKeyPair.getPublic()
);
final DERSequence otherName = new DERSequence( new ASN1Encodable[] {DNS_SRV_OID, new DERIA5String( "_xmpp-server."+subjectAltNameDnsSrv ) });
final GeneralNames subjectAltNames = new GeneralNames( new GeneralName(GeneralName.otherName, otherName ) );
builder.addExtension( Extension.subjectAlternativeName, true, subjectAltNames );
final X509CertificateHolder certificateHolder = builder.build( contentSigner );
final X509Certificate cert = new JcaX509CertificateConverter().getCertificate( certificateHolder );
// Execute system under test
final List<String> serverIdentities = CertificateManager.getServerIdentities( cert );
// Verify result
assertEquals( 1, serverIdentities.size() );
assertTrue( serverIdentities.contains( subjectAltNameDnsSrv ));
assertFalse( serverIdentities.contains( subjectCommonName ) );
}
|
public McastConfig setEgressInnerVlan(VlanId vlanId) {
if (vlanId == null) {
object.remove(EGRESS_INNER_VLAN);
} else {
object.put(EGRESS_INNER_VLAN, vlanId.toString());
}
return this;
}
|
@Test
public void setEgressInnerVlan() {
config.setEgressInnerVlan(EGRESS_INNER_VLAN_2);
VlanId egressInnerVlan = config.egressInnerVlan();
assertNotNull("egressInnerVlan should not be null", egressInnerVlan);
assertThat(egressInnerVlan, is(EGRESS_INNER_VLAN_2));
}
|
@Override
public Map<String, Object> encode(Object object) throws EncodeException {
if (object == null) {
return Collections.emptyMap();
}
try {
ObjectParamMetadata metadata = getMetadata(object.getClass());
Map<String, Object> propertyNameToValue = new HashMap<String, Object>();
for (PropertyDescriptor pd : metadata.objectProperties) {
Method method = pd.getReadMethod();
Object value = method.invoke(object);
if (value != null && value != object) {
Param alias = method.getAnnotation(Param.class);
String name = alias != null ? alias.value() : pd.getName();
propertyNameToValue.put(name, value);
}
}
return propertyNameToValue;
} catch (IllegalAccessException | IntrospectionException | InvocationTargetException e) {
throw new EncodeException("Failure encoding object into query map", e);
}
}
|
@Test
void defaultEncoder_acceptNullValue() {
assertThat(encoder.encode(null)).as("Empty map should be returned")
.isEqualTo(Collections.EMPTY_MAP);
}
|
@Override
public SelType binaryOps(SelOp op, SelType rhs) {
if (rhs.type() == SelTypes.NULL) {
if (op == SelOp.EQUAL || op == SelOp.NOT_EQUAL) {
return rhs.binaryOps(op, this);
} else {
throw new UnsupportedOperationException(
this.type() + " DO NOT support " + op + " for rhs with NULL value");
}
}
if (rhs.type() == SelTypes.STRING) {
return SelString.of(String.valueOf(this.val)).binaryOps(op, rhs);
}
double another = ((Number) rhs.getInternalVal()).doubleValue();
switch (op) {
case EQUAL:
return SelBoolean.of(this.val == another);
case NOT_EQUAL:
return SelBoolean.of(this.val != another);
case LT:
return SelBoolean.of(this.val < another);
case GT:
return SelBoolean.of(this.val > another);
case LTE:
return SelBoolean.of(this.val <= another);
case GTE:
return SelBoolean.of(this.val >= another);
case ADD:
return new SelDouble(this.val + another);
case SUB:
return new SelDouble(this.val - another);
case MUL:
return new SelDouble(this.val * another);
case DIV:
return new SelDouble(this.val / another);
case MOD:
return new SelDouble(this.val % another);
case PLUS:
return new SelDouble(this.val);
case MINUS:
return new SelDouble(-this.val);
default:
throw new UnsupportedOperationException(
"float/Float/double/Doubles DO NOT support expression operation " + op);
}
}
|
@Test
public void testBinaryOps() {
SelType obj = SelLong.of(2);
SelType res = one.binaryOps(SelOp.EQUAL, obj);
assertEquals("BOOLEAN: false", res.type() + ": " + res);
res = one.binaryOps(SelOp.NOT_EQUAL, obj);
assertEquals("BOOLEAN: true", res.type() + ": " + res);
res = one.binaryOps(SelOp.LT, obj);
assertEquals("BOOLEAN: true", res.type() + ": " + res);
res = one.binaryOps(SelOp.GT, obj);
assertEquals("BOOLEAN: false", res.type() + ": " + res);
res = one.binaryOps(SelOp.LTE, obj);
assertEquals("BOOLEAN: true", res.type() + ": " + res);
res = one.binaryOps(SelOp.GTE, obj);
assertEquals("BOOLEAN: false", res.type() + ": " + res);
res = one.binaryOps(SelOp.ADD, obj);
assertEquals(3.1, ((SelDouble) res).doubleVal(), 0.01);
res = res.binaryOps(SelOp.SUB, obj);
assertEquals(1.1, ((SelDouble) res).doubleVal(), 0.01);
res = res.binaryOps(SelOp.MUL, obj);
assertEquals(2.2, ((SelDouble) res).doubleVal(), 0.01);
res = res.binaryOps(SelOp.DIV, obj);
assertEquals(1.1, ((SelDouble) res).doubleVal(), 0.01);
res = res.binaryOps(SelOp.MOD, obj);
assertEquals(1.1, ((SelDouble) res).doubleVal(), 0.01);
res = res.binaryOps(SelOp.PLUS, obj);
assertEquals(1.1, ((SelDouble) res).doubleVal(), 0.01);
res = res.binaryOps(SelOp.MINUS, obj);
assertEquals(-1.1, ((SelDouble) res).doubleVal(), 0.01);
res = one.binaryOps(SelOp.EQUAL, SelType.NULL);
assertEquals("BOOLEAN: false", res.type() + ": " + res);
res = one.binaryOps(SelOp.NOT_EQUAL, SelType.NULL);
assertEquals("BOOLEAN: true", res.type() + ": " + res);
res = one.binaryOps(SelOp.ADD, SelString.of("2"));
assertEquals("STRING: 1.12", res.type() + ": " + res);
}
|
@Override
protected Endpoint createEndpoint(final String uri, final String remaining, final Map<String, Object> parameters)
throws Exception {
if (ObjectHelper.isEmpty(remaining)) {
throw new IllegalArgumentException("You must provide a channel for the Dynamic Router");
}
DynamicRouterConfiguration configuration = new DynamicRouterConfiguration();
configuration.setChannel(remaining);
filterService.initializeChannelFilters(configuration.getChannel());
DynamicRouterEndpoint endpoint = endpointFactorySupplier.get()
.getInstance(uri, this, configuration, processorFactorySupplier, producerFactorySupplier, recipientListSupplier,
filterService);
setProperties(endpoint, parameters);
return endpoint;
}
|
@Test
void testCreateEndpointWithEmptyRemainingError() {
component.setCamelContext(context);
assertThrows(IllegalArgumentException.class,
() -> component.createEndpoint("dynamic-router:testname", "", Collections.emptyMap()));
}
|
@NonNull
@Override
public ConnectionFileName toPvfsFileName( @NonNull FileName providerFileName, @NonNull T details )
throws KettleException {
// Determine the part of provider file name following the connection "root".
// Use the transformer to generate the connection root provider uri.
// Both uris are assumed to be normalized.
// Examples:
// - connectionRootProviderUri: "hcp://domain.my:443/root/path/" | "s3://" | "local://"
// - providerUri: "hcp://domain.my:443/root/path/rest/path" | "s3://rest/path"
// Example: "pvfs://my-connection"
String connectionRootProviderUri = getConnectionRootProviderUriPrefix( details );
String providerUri = providerFileName.getURI();
if ( !connectionFileNameUtils.isDescendantOrSelf( providerUri, connectionRootProviderUri ) ) {
throw new IllegalArgumentException(
String.format(
"Provider file name '%s' is not a descendant of the connection root '%s'.",
providerUri,
connectionRootProviderUri ) );
}
String restUriPath = providerUri.substring( connectionRootProviderUri.length() );
// Examples: "/rest/path" or "rest/path"
return buildPvfsFileName( details, restUriPath, providerFileName.getType() );
}
|
@Test
public void testToPvfsFileNameHandlesConnectionsWithDomainAndBuckets() throws Exception {
// Example: SMB
mockDetailsWithDomain( details1, "my-domain:8080" );
when( details1.hasBuckets() ).thenReturn( true );
String connectionRootProviderUriPrefix = "scheme1://my-domain:8080";
String restPath = "/rest/path";
FileName providerFileName = mockFileNameWithUri( FileName.class, connectionRootProviderUriPrefix + restPath );
ConnectionFileName pvfsFileName = transformer.toPvfsFileName( providerFileName, details1 );
assertEquals( "pvfs://connection-name1" + restPath, pvfsFileName.getURI() );
// Should do connection root provider uri normalization.
verify( kettleVFS, times( 1 ) ).resolveURI( connectionRootProviderUriPrefix );
}
|
@Override
public Local create(final Path file) {
return this.create(new UUIDRandomStringService().random(), file);
}
|
@Test
public void testCreateContainer() {
final String temp = StringUtils.removeEnd(System.getProperty("java.io.tmpdir"), File.separator);
final String s = System.getProperty("file.separator");
final Path file = new Path("/container", EnumSet.of(Path.Type.directory));
file.attributes().setRegion("region");
assertEquals(String.format("%s%su%s1742810335%scontainer", temp, s, s, s),
new DefaultTemporaryFileService().create("u", file).getAbsolute());
}
|
@Override
public Num calculate(BarSeries series, Position position) {
Num stdDevPnl = standardDeviationCriterion.calculate(series, position);
if (stdDevPnl.isZero()) {
return series.zero();
}
// SQN = (Average (PnL) / StdDev(PnL)) * SquareRoot(NumberOfTrades)
Num numberOfPositions = numberOfPositionsCriterion.calculate(series, position);
Num pnl = criterion.calculate(series, position);
Num avgPnl = pnl.dividedBy(numberOfPositions);
return avgPnl.dividedBy(stdDevPnl).multipliedBy(numberOfPositions.sqrt());
}
|
@Test
public void calculateWithLosingLongPositions() {
MockBarSeries series = new MockBarSeries(numFunction, 100, 95, 100, 80, 85, 70);
TradingRecord tradingRecord = new BaseTradingRecord(Trade.buyAt(0, series), Trade.sellAt(1, series),
Trade.buyAt(2, series), Trade.sellAt(5, series));
AnalysisCriterion sqnCriterion = getCriterion();
assertNumEquals(-1.9798989873223332, sqnCriterion.calculate(series, tradingRecord));
}
|
@Override
public void hardStop() {
if (nodeLifecycle.tryToMoveTo(HARD_STOPPING)) {
LOG.info("Hard stopping SonarQube");
hardStopImpl();
}
}
|
@Test
public void awaitTermination_blocks_until_all_processes_are_stopped() throws Exception {
TestAppSettings settings = new TestAppSettings();
Scheduler underTest = startAll(settings);
Thread awaitingTermination = new Thread(underTest::awaitTermination);
awaitingTermination.start();
assertThat(awaitingTermination.isAlive()).isTrue();
underTest.hardStop();
// the thread is being stopped
awaitingTermination.join();
assertThat(awaitingTermination.isAlive()).isFalse();
}
|
public static void main( String[] args )
{
// suppress the Dock icon on OS X
System.setProperty("apple.awt.UIElement", "true");
int exitCode = new CommandLine(new ExtractText()).execute(args);
System.exit(exitCode);
}
|
@Test
void testPDFBoxRepeatableSubcommandAddFileNameOutfileAppend(@TempDir Path tempDir)
throws Exception
{
Path path = null;
try
{
path = tempDir.resolve("outfile.txt");
Files.deleteIfExists(path);
}
catch (InvalidPathException ipe)
{
System.err.println(
"Error creating temporary test file in " + this.getClass().getSimpleName());
}
assertNotNull(path);
PDFBox.main(new String[] { "export:text", "-i", testfile1, "-encoding", "UTF-8",
"-addFileName", "-o", path.toString(), //
"export:text", "-i", testfile2, "-encoding", "UTF-8",
"-addFileName", "-o", path.toString(), "-append" });
String result = new String(Files.readAllBytes(path), "UTF-8");
assertTrue(result.contains("PDF1"));
assertTrue(result.contains("PDF2"));
assertTrue(result.contains("PDF file: " + filename1));
assertTrue(result.contains("Hello"));
assertTrue(result.contains("World."));
assertTrue(result.contains("PDF file: " + filename2));
}
|
public static ParameterizedType collectionOf(Type elementType) {
return parameterizedType(Collection.class, elementType);
}
|
@Test
public void createCollectionType() {
ParameterizedType type = Types.collectionOf(Person.class);
assertThat(type.getRawType()).isEqualTo(Collection.class);
assertThat(type.getActualTypeArguments()).isEqualTo(new Type[] {Person.class});
}
|
public BlockFilter(Web3j web3j, Callback<String> callback) {
super(web3j, callback);
}
|
@Test
public void testBlockFilter() throws Exception {
EthLog ethLog =
objectMapper.readValue(
"{\"jsonrpc\":\"2.0\",\"id\":1,\"result\":["
+ "\"0x31c2342b1e0b8ffda1507fbffddf213c4b3c1e819ff6a84b943faabb0ebf2403\","
+ "\"0xccc0d2e07c1febcaca0c3341c4e1268204b06fefa4bb0c8c0d693d8e581c82da\""
+ "]}",
EthLog.class);
runTest(ethLog, web3j.ethBlockHashFlowable());
}
|
public Set<KsqlTopic> getSourceTopics() {
return Collections.unmodifiableSet(sourceTopics);
}
|
@Test
public void shouldExtractJoinTopicsFromJoinSelect() {
// Given:
final Statement statement = givenStatement(String.format(
"SELECT * FROM %s A JOIN %s B ON A.F1 = B.F1;", STREAM_TOPIC_1, STREAM_TOPIC_2
));
// When:
extractor.process(statement, null);
// Then:
assertThat(extractor.getSourceTopics(), contains(TOPIC_2, TOPIC_1));
}
|
@Deprecated
public PassiveCompletableFuture<TaskExecutionState> deployLocalTask(
@NonNull TaskGroup taskGroup) {
return deployLocalTask(
taskGroup, Thread.currentThread().getContextClassLoader(), emptyList());
}
|
@Test
public void testThrowException() throws InterruptedException {
TaskExecutionService taskExecutionService = server.getTaskExecutionService();
AtomicBoolean stopMark = new AtomicBoolean(false);
long t1Sleep = 100;
long t2Sleep = 50;
long lowLagSleep = 50;
long highLagSleep = 300;
List<Throwable> t1throwable = new ArrayList<>();
ExceptionTestTask t1 = new ExceptionTestTask(t1Sleep, "t1", t1throwable);
List<Throwable> t2throwable = new ArrayList<>();
ExceptionTestTask t2 = new ExceptionTestTask(t2Sleep, "t2", t2throwable);
// Create low lat tasks
List<Task> lowLagTask =
buildFixedTestTask(lowLagSleep, 10, stopMark, new CopyOnWriteArrayList<>());
// Create high lat tasks
List<Task> highLagTask =
buildFixedTestTask(highLagSleep, 5, stopMark, new CopyOnWriteArrayList<>());
List<Task> tasks = new ArrayList<>();
tasks.addAll(highLagTask);
tasks.addAll(lowLagTask);
Collections.shuffle(tasks);
CompletableFuture<TaskExecutionState> taskCts =
taskExecutionService.deployLocalTask(
new TaskGroupDefaultImpl(
new TaskGroupLocation(
jobId, pipeLineId, FLAKE_ID_GENERATOR.newId()),
"ts",
Lists.newArrayList(tasks)));
CompletableFuture<TaskExecutionState> t1c =
taskExecutionService.deployLocalTask(
new TaskGroupDefaultImpl(
new TaskGroupLocation(
jobId, pipeLineId, FLAKE_ID_GENERATOR.newId()),
"t1",
Lists.newArrayList(t1)));
CompletableFuture<TaskExecutionState> t2c =
taskExecutionService.deployLocalTask(
new TaskGroupDefaultImpl(
new TaskGroupLocation(
jobId, pipeLineId, FLAKE_ID_GENERATOR.newId()),
"t2",
Lists.newArrayList(t2)));
Thread.sleep(taskRunTime);
t1throwable.add(new IOException());
t2throwable.add(new IOException());
await().atMost(t1Sleep + t2Sleep + 1000, TimeUnit.MILLISECONDS)
.untilAsserted(
() -> {
assertEquals(FAILED, t1c.get().getExecutionState());
assertEquals(FAILED, t2c.get().getExecutionState());
});
stopMark.set(true);
await().atMost(lowLagSleep * 10 + highLagSleep + 1000, TimeUnit.MILLISECONDS)
.untilAsserted(() -> assertEquals(FINISHED, taskCts.get().getExecutionState()));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.