focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
public static File convertLineSeparator(File file, Charset charset, LineSeparator lineSeparator) {
final List<String> lines = readLines(file, charset);
return FileWriter.create(file, charset).writeLines(lines, lineSeparator, false);
}
|
@Test
@Disabled
public void convertLineSeparatorTest() {
FileUtil.convertLineSeparator(FileUtil.file("d:/aaa.txt"), CharsetUtil.CHARSET_UTF_8, LineSeparator.WINDOWS);
}
|
public SqlType getExpressionSqlType(final Expression expression) {
return getExpressionSqlType(expression, Collections.emptyMap());
}
|
@Test
public void shouldProcessBytesLiteral() {
assertThat(expressionTypeManager.getExpressionSqlType(new BytesLiteral(ByteBuffer.wrap(new byte[] {123}))), is(SqlTypes.BYTES));
}
|
@Override
public ProtobufSystemInfo.Section toProtobuf() {
ProtobufSystemInfo.Section.Builder protobuf = ProtobufSystemInfo.Section.newBuilder();
protobuf.setName("Compute Engine Tasks");
try (DbSession dbSession = dbClient.openSession(false)) {
setAttribute(protobuf, "Total Pending", dbClient.ceQueueDao().countByStatus(dbSession, CeQueueDto.Status.PENDING));
setAttribute(protobuf, "Total In Progress", dbClient.ceQueueDao().countByStatus(dbSession, CeQueueDto.Status.IN_PROGRESS));
setAttribute(protobuf, "Max Workers per Node", workerCountProvider == null ? DEFAULT_NB_OF_WORKERS : workerCountProvider.get());
setAttribute(protobuf, "Workers Paused", "true".equals(dbClient.internalPropertiesDao().selectByKey(dbSession, InternalProperties.COMPUTE_ENGINE_PAUSE).orElse(null)));
}
return protobuf.build();
}
|
@Test
public void test_workers_paused() {
when(dbClient.internalPropertiesDao().selectByKey(any(), eq(InternalProperties.COMPUTE_ENGINE_PAUSE))).thenReturn(Optional.of("true"));
CeQueueGlobalSection underTest = new CeQueueGlobalSection(dbClient, workerCountProvider);
ProtobufSystemInfo.Section section = underTest.toProtobuf();
assertThatAttributeIs(section, "Workers Paused", true);
}
|
public DoubleArrayAsIterable usingTolerance(double tolerance) {
return new DoubleArrayAsIterable(tolerance(tolerance), iterableSubject());
}
|
@Test
public void usingTolerance_contains_otherTypes() {
// Expected value is Float
assertThat(array(1.0, 2.0 + 0.5 * DEFAULT_TOLERANCE, 3.0))
.usingTolerance(DEFAULT_TOLERANCE)
.contains(2.0f);
// Expected value is Integer
assertThat(array(1.0, 2.0 + 0.5 * DEFAULT_TOLERANCE, 3.0))
.usingTolerance(DEFAULT_TOLERANCE)
.contains(2);
// Expected value is Integer.MAX_VALUE
assertThat(array(1.0, Integer.MAX_VALUE + 0.5 * DEFAULT_TOLERANCE, 3.0))
.usingTolerance(DEFAULT_TOLERANCE)
.contains(Integer.MAX_VALUE);
// Expected value is Long
assertThat(array(1.0, 2.0 + 0.5 * DEFAULT_TOLERANCE, 3.0))
.usingTolerance(DEFAULT_TOLERANCE)
.contains(2L);
// Expected value is Long.MIN_VALUE. This is -1*2^63, which has an exact double representation.
// For the actual value we use the next value down, which is is 2^11 smaller (because the
// resolution of doubles with absolute values between 2^63 and 2^64 is 2^11). So we'll make the
// assertion with a tolerance of 2^12.
assertThat(array(1.0, UNDER_MIN_OF_LONG, 3.0)).usingTolerance(1 << 12).contains(Long.MIN_VALUE);
// Expected value is BigInteger
assertThat(array(1.0, 2.0 + 0.5 * DEFAULT_TOLERANCE, 3.0))
.usingTolerance(DEFAULT_TOLERANCE)
.contains(BigInteger.valueOf(2));
// Expected value is BigDecimal
assertThat(array(1.0, 2.0 + 0.5 * DEFAULT_TOLERANCE, 3.0))
.usingTolerance(DEFAULT_TOLERANCE)
.contains(BigDecimal.valueOf(2.0));
}
|
@Override
@Transient
public T get(boolean check) {
if (destroyed) {
throw new IllegalStateException("The invoker of ReferenceConfig(" + url + ") has already destroyed!");
}
if (ref == null) {
if (getScopeModel().isLifeCycleManagedExternally()) {
// prepare model for reference
getScopeModel().getDeployer().prepare();
} else {
// ensure start module, compatible with old api usage
getScopeModel().getDeployer().start();
}
init(check);
}
return ref;
}
|
@Test
void testMetaData() {
ReferenceConfig config = new ReferenceConfig();
Map<String, String> metaData = config.getMetaData();
Assertions.assertEquals(0, metaData.size(), "Expect empty metadata but found: " + metaData);
// test merged and override consumer attributes
ConsumerConfig consumerConfig = new ConsumerConfig();
consumerConfig.setAsync(true);
consumerConfig.setActives(10);
config.setConsumer(consumerConfig);
config.setAsync(false); // override
metaData = config.getMetaData();
Assertions.assertEquals(2, metaData.size());
Assertions.assertEquals(String.valueOf(consumerConfig.getActives()), metaData.get("actives"));
Assertions.assertEquals(String.valueOf(config.isAsync()), metaData.get("async"));
}
|
@Override
public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) {
super.onDataReceived(device, data);
if (data.size() < 3) {
onInvalidDataReceived(device, data);
return;
}
final int opCode = data.getIntValue(Data.FORMAT_UINT8, 0);
if (opCode != OP_CODE_NUMBER_OF_STORED_RECORDS_RESPONSE && opCode != OP_CODE_RESPONSE_CODE) {
onInvalidDataReceived(device, data);
return;
}
final int operator = data.getIntValue(Data.FORMAT_UINT8, 1);
if (operator != OPERATOR_NULL) {
onInvalidDataReceived(device, data);
return;
}
switch (opCode) {
case OP_CODE_NUMBER_OF_STORED_RECORDS_RESPONSE -> {
// Field size is defined per service
int numberOfRecords;
switch (data.size() - 2) {
case 1 -> numberOfRecords = data.getIntValue(Data.FORMAT_UINT8, 2);
case 2 -> numberOfRecords = data.getIntValue(Data.FORMAT_UINT16_LE, 2);
case 4 -> numberOfRecords = data.getIntValue(Data.FORMAT_UINT32_LE, 2);
default -> {
// Other field sizes are not supported
onInvalidDataReceived(device, data);
return;
}
}
onNumberOfRecordsReceived(device, numberOfRecords);
}
case OP_CODE_RESPONSE_CODE -> {
if (data.size() != 4) {
onInvalidDataReceived(device, data);
return;
}
final int requestCode = data.getIntValue(Data.FORMAT_UINT8, 2);
final int responseCode = data.getIntValue(Data.FORMAT_UINT8, 3);
if (responseCode == RACP_RESPONSE_SUCCESS) {
onRecordAccessOperationCompleted(device, requestCode);
} else if (responseCode == RACP_ERROR_NO_RECORDS_FOUND) {
onRecordAccessOperationCompletedWithNoRecordsFound(device, requestCode);
} else {
onRecordAccessOperationError(device, requestCode, responseCode);
}
}
}
}
|
@Test
public void onInvalidDataReceived_tooShort() {
final Data data = new Data(new byte[] { 1, 1 });
callback.onDataReceived(null, data);
assertTrue(invalidData);
}
|
public static ClusterOperatorConfig buildFromMap(Map<String, String> map) {
warningsForRemovedEndVars(map);
KafkaVersion.Lookup lookup = parseKafkaVersions(map.get(STRIMZI_KAFKA_IMAGES), map.get(STRIMZI_KAFKA_CONNECT_IMAGES), map.get(STRIMZI_KAFKA_MIRROR_MAKER_IMAGES), map.get(STRIMZI_KAFKA_MIRROR_MAKER_2_IMAGES));
return buildFromMap(map, lookup);
}
|
@Test
public void testInvalidOperatorNamespaceLabels() {
Map<String, String> envVars = new HashMap<>(ClusterOperatorConfigTest.ENV_VARS);
envVars.put(ClusterOperatorConfig.OPERATOR_NAMESPACE_LABELS.key(), "nsLabelKey1,nsLabelKey2");
InvalidConfigurationException e = assertThrows(InvalidConfigurationException.class, () -> ClusterOperatorConfig.buildFromMap(envVars, KafkaVersionTestUtils.getKafkaVersionLookup()));
assertThat(e.getMessage(), containsString("Failed to parse. Value nsLabelKey1,nsLabelKey2 is not valid"));
}
|
String getScope() {
return (settings.syncGroups() || isOrganizationMembershipRequired()) ? "user:email,read:org" : "user:email";
}
|
@Test
public void scope_includes_org_when_necessary() {
setSettings(false);
settings.setProperty("sonar.auth.github.groupsSync", false);
settings.setProperty("sonar.auth.github.organizations", "");
assertThat(underTest.getScope()).isEqualTo("user:email");
settings.setProperty("sonar.auth.github.groupsSync", true);
settings.setProperty("sonar.auth.github.organizations", "");
assertThat(underTest.getScope()).isEqualTo("user:email,read:org");
settings.setProperty("sonar.auth.github.groupsSync", false);
settings.setProperty("sonar.auth.github.organizations", "example");
assertThat(underTest.getScope()).isEqualTo("user:email,read:org");
settings.setProperty("sonar.auth.github.groupsSync", true);
settings.setProperty("sonar.auth.github.organizations", "example");
assertThat(underTest.getScope()).isEqualTo("user:email,read:org");
}
|
@Override
public Object copy(Object value) {
Class<?>[] interfaces = value.getClass().getInterfaces();
InvocationHandler invocationHandler = Proxy.getInvocationHandler(value);
Preconditions.checkNotNull(interfaces);
Preconditions.checkNotNull(invocationHandler);
Object proxy = Proxy.newProxyInstance(fury.getClassLoader(), interfaces, STUB_HANDLER);
if (needToCopyRef) {
fury.reference(value, proxy);
}
Platform.putObject(proxy, PROXY_HANDLER_FIELD_OFFSET, fury.copyObject(invocationHandler));
return proxy;
}
|
@Test(dataProvider = "furyCopyConfig")
public void testJdkProxy(Fury fury) {
Function function =
(Function)
Proxy.newProxyInstance(
fury.getClassLoader(), new Class[] {Function.class}, new TestInvocationHandler());
Function copy = fury.copy(function);
assertNotSame(copy, function);
assertEquals(copy.apply(null), 1);
}
|
public static Options options() {
return new Options("/tmp", 100, SorterType.HADOOP);
}
|
@Test
public void testMemoryTooLarge() {
thrown.expect(IllegalArgumentException.class);
thrown.expectMessage("memoryMB must be less than 2048");
BufferedExternalSorter.Options options = BufferedExternalSorter.options();
options.withMemoryMB(2048);
}
|
public static KMeans fit(double[][] data, int k) {
return fit(data, k, 100, 1E-4);
}
|
@Test
public void testBBD4() {
System.out.println("BBD 4");
MathEx.setSeed(19650218); // to get repeatable results.
KMeans model = KMeans.fit(x, 4);
System.out.println(model);
double r = RandIndex.of(y, model.y);
double r2 = AdjustedRandIndex.of(y, model.y);
System.out.format("Training rand index = %.2f%%, adjusted rand index = %.2f%%%n", 100.0 * r, 100.0 * r2);
assertEquals(0.6111, r, 1E-4);
assertEquals(0.2475, r2, 1E-4);
System.out.format("MI = %.2f%n", MutualInformation.of(y, model.y));
System.out.format("NMI.joint = %.2f%%%n", 100 * NormalizedMutualInformation.joint(y, model.y));
System.out.format("NMI.max = %.2f%%%n", 100 * NormalizedMutualInformation.max(y, model.y));
System.out.format("NMI.min = %.2f%%%n", 100 * NormalizedMutualInformation.min(y, model.y));
System.out.format("NMI.sum = %.2f%%%n", 100 * NormalizedMutualInformation.sum(y, model.y));
System.out.format("NMI.sqrt = %.2f%%%n", 100 * NormalizedMutualInformation.sqrt(y, model.y));
}
|
public State getState() {
return state;
}
|
@Test
public void testGetState() throws Exception {
assertNull( info.getState() );
info.setState( LifeEventInfo.State.FAIL );
assertEquals( LifeEventInfo.State.FAIL, info.getState() );
}
|
@Override
public FetchContext planFetchForPruning(IndexSegment indexSegment, QueryContext queryContext) {
// Extract columns in EQ/IN predicates.
Set<String> eqInColumns = new HashSet<>();
extractEqInColumns(Objects.requireNonNull(queryContext.getFilter()), eqInColumns);
Map<String, List<IndexType<?, ?, ?>>> columnToIndexList = new HashMap<>();
for (String column : eqInColumns) {
DataSource dataSource = indexSegment.getDataSource(column);
if (dataSource.getBloomFilter() != null) {
columnToIndexList.put(column, Collections.singletonList(StandardIndexes.bloomFilter()));
}
}
return new FetchContext(UUID.randomUUID(), indexSegment.getSegmentName(), columnToIndexList);
}
|
@Test
public void testPlanFetchForPruning() {
DefaultFetchPlanner planner = new DefaultFetchPlanner();
IndexSegment indexSegment = mock(IndexSegment.class);
when(indexSegment.getSegmentName()).thenReturn("s0");
when(indexSegment.getColumnNames()).thenReturn(ImmutableSet.of("c0", "c1", "c2"));
String query = "SELECT COUNT(*) FROM testTable WHERE c0 = 0 OR (c1 < 10 AND c2 IN (1, 2))";
QueryContext queryContext = QueryContextConverterUtils.getQueryContext(query);
// No Bloomfilter for those columns.
DataSource ds0 = mock(DataSource.class);
when(indexSegment.getDataSource("c0")).thenReturn(ds0);
when(ds0.getBloomFilter()).thenReturn(null);
DataSource ds2 = mock(DataSource.class);
when(indexSegment.getDataSource("c2")).thenReturn(ds2);
when(ds2.getBloomFilter()).thenReturn(null);
FetchContext fetchContext = planner.planFetchForPruning(indexSegment, queryContext);
assertTrue(fetchContext.isEmpty());
// Add Bloomfilter for column c0.
BloomFilterReader bfReader = mock(BloomFilterReader.class);
when(ds0.getBloomFilter()).thenReturn(bfReader);
fetchContext = planner.planFetchForPruning(indexSegment, queryContext);
assertFalse(fetchContext.isEmpty());
assertEquals(fetchContext.getSegmentName(), "s0");
Map<String, List<IndexType<?, ?, ?>>> columns = fetchContext.getColumnToIndexList();
assertEquals(columns.size(), 1);
List<IndexType<?, ?, ?>> idxTypes = columns.get("c0");
assertEquals(idxTypes.size(), 1);
assertEquals(idxTypes.get(0), StandardIndexes.bloomFilter());
}
|
@Bean
public TimeLimiterRegistry timeLimiterRegistry(
TimeLimiterConfigurationProperties timeLimiterConfigurationProperties,
EventConsumerRegistry<TimeLimiterEvent> timeLimiterEventConsumerRegistry,
RegistryEventConsumer<TimeLimiter> timeLimiterRegistryEventConsumer,
@Qualifier("compositeTimeLimiterCustomizer") CompositeCustomizer<TimeLimiterConfigCustomizer> compositeTimeLimiterCustomizer) {
TimeLimiterRegistry timeLimiterRegistry =
createTimeLimiterRegistry(timeLimiterConfigurationProperties, timeLimiterRegistryEventConsumer,
compositeTimeLimiterCustomizer);
registerEventConsumer(timeLimiterRegistry, timeLimiterEventConsumerRegistry, timeLimiterConfigurationProperties);
initTimeLimiterRegistry(timeLimiterRegistry, timeLimiterConfigurationProperties, compositeTimeLimiterCustomizer);
return timeLimiterRegistry;
}
|
@Test
public void testCreateTimeLimiterRegistryWithUnknownConfig() {
TimeLimiterConfigurationProperties timeLimiterConfigurationProperties = new TimeLimiterConfigurationProperties();
io.github.resilience4j.common.timelimiter.configuration.CommonTimeLimiterConfigurationProperties.InstanceProperties instanceProperties = new io.github.resilience4j.common.timelimiter.configuration.CommonTimeLimiterConfigurationProperties.InstanceProperties();
instanceProperties.setBaseConfig("unknownConfig");
timeLimiterConfigurationProperties.getInstances().put("backend", instanceProperties);
TimeLimiterConfiguration timeLimiterConfiguration = new TimeLimiterConfiguration();
DefaultEventConsumerRegistry<TimeLimiterEvent> eventConsumerRegistry = new DefaultEventConsumerRegistry<>();
//When
assertThatThrownBy(() -> timeLimiterConfiguration.timeLimiterRegistry(timeLimiterConfigurationProperties, eventConsumerRegistry, new CompositeRegistryEventConsumer<>(emptyList()), compositeTimeLimiterCustomizerTestInstance()))
.isInstanceOf(ConfigurationNotFoundException.class)
.hasMessage("Configuration with name 'unknownConfig' does not exist");
}
|
public boolean tryDisableBinlog(Database db, long tableId) {
// modify FE meta, return true
// try best effort to distribute BE tasks
HashMap<String, String> properties = new HashMap<>();
properties.put(PropertyAnalyzer.PROPERTIES_BINLOG_ENABLE, "false");
SchemaChangeHandler schemaChangeHandler = GlobalStateMgr.getCurrentState().getSchemaChangeHandler();
return schemaChangeHandler.updateBinlogConfigMeta(db, tableId, properties, TTabletMetaType.DISABLE_BINLOG);
}
|
@Test
public void testTryDisableBinlog() {
Database db = GlobalStateMgr.getCurrentState().getDb("test");
OlapTable table = (OlapTable) db.getTable("binlog_test");
boolean result = binlogManager.tryDisableBinlog(db, table.getId());
Assert.assertFalse(table.isBinlogEnabled());
}
|
void precheckMaxResultLimitOnLocalPartitions(String mapName) {
// check if feature is enabled
if (!isPreCheckEnabled) {
return;
}
// limit number of local partitions to check to keep runtime constant
PartitionIdSet localPartitions = mapServiceContext.getCachedOwnedPartitions();
int partitionsToCheck = min(localPartitions.size(), maxLocalPartitionsLimitForPreCheck);
if (partitionsToCheck == 0) {
return;
}
// calculate size of local partitions
int localPartitionSize = getLocalPartitionSize(mapName, localPartitions, partitionsToCheck);
if (localPartitionSize == 0) {
return;
}
// check local result size
long localResultLimit = getNodeResultLimit(partitionsToCheck);
if (localPartitionSize > localResultLimit * MAX_RESULT_LIMIT_FACTOR_FOR_PRECHECK) {
var localMapStatsProvider = mapServiceContext.getLocalMapStatsProvider();
if (localMapStatsProvider != null && localMapStatsProvider.hasLocalMapStatsImpl(mapName)) {
localMapStatsProvider.getLocalMapStatsImpl(mapName).incrementQueryResultSizeExceededCount();
}
throw new QueryResultSizeExceededException(maxResultLimit, " Result size exceeded in local pre-check.");
}
}
|
@Test
public void testLocalPreCheckEnabledWithNoLocalPartitions() {
initMocksWithConfiguration(200000, 1);
limiter.precheckMaxResultLimitOnLocalPartitions(ANY_MAP_NAME);
}
|
@Override
public Messages process(Messages messages) {
try (Timer.Context ignored = executionTime.time()) {
final State latestState = stateUpdater.getLatestState();
if (latestState.enableRuleMetrics()) {
return process(messages, new RuleMetricsListener(metricRegistry), latestState);
}
return process(messages, new NoopInterpreterListener(), latestState);
}
}
|
@Test
public void process_ruleStatementEvaluationErrorConvertedIntoMessageProcessingError() throws Exception {
// given
when(ruleService.loadAll()).thenReturn(ImmutableList.of(RuleDao.create("broken_statement", "broken_statement",
"broken_statement",
"rule \"broken_statement\"\n" +
"when\n" +
" has_field(\"num\")\n" +
"then\n" +
" set_field(\"num_sqr\", $message.num * $message.num);\n" +
"end", null, null, null, null)));
when(pipelineService.loadAll()).thenReturn(Collections.singleton(
PipelineDao.create("p1", "title", "description",
"pipeline \"pipeline\"\n" +
"stage 0 match all\n" +
" rule \"broken_statement\";\n" +
"end\n",
Tools.nowUTC(),
null)
));
final PipelineInterpreter interpreter = createPipelineInterpreter(ruleService, pipelineService, ImmutableMap.of(
SetField.NAME, new SetField(),
DoubleConversion.NAME, new DoubleConversion(),
HasField.NAME, new HasField()
));
// when
final List<Message> processed = extractMessagesFromMessageCollection(interpreter.process(messageWithNumField(Long.valueOf(1))));
// then
assertThat(processed)
.hasSize(1)
.hasOnlyOneElementSatisfying(m -> {
assertThat(m.processingErrors())
.hasSize(1)
.hasOnlyOneElementSatisfying(pe -> {
assertThat(pe.getCause()).isEqualTo(ProcessingFailureCause.RuleStatementEvaluationError);
assertThat(pe.getMessage()).isEqualTo("Error evaluating action for rule <broken_statement/broken_statement> (pipeline <pipeline/p1>)");
assertThat(pe.getDetails()).isEqualTo("In call to function 'set_field' at 5:4 an exception was thrown: class java.lang.Long cannot be cast to class java.lang.Double (java.lang.Long and java.lang.Double are in module java.base of loader 'bootstrap')");
});
});
}
|
public static List<String> getFieldNames(DataType dataType) {
final LogicalType type = dataType.getLogicalType();
if (type.is(LogicalTypeRoot.DISTINCT_TYPE)) {
return getFieldNames(dataType.getChildren().get(0));
} else if (isCompositeType(type)) {
return LogicalTypeChecks.getFieldNames(type);
}
return Collections.emptyList();
}
|
@Test
void testGetFieldNames() {
assertThat(
DataType.getFieldNames(
ROW(
FIELD("c0", BOOLEAN()),
FIELD("c1", DOUBLE()),
FIELD("c2", INT()))))
.containsExactly("c0", "c1", "c2");
assertThat(
DataType.getFieldNames(
STRUCTURED(
DataTypesTest.SimplePojo.class,
FIELD("name", STRING()),
FIELD("count", INT().notNull().bridgedTo(int.class)))))
.containsExactly("name", "count");
assertThat(DataType.getFieldNames(ARRAY(INT()))).isEmpty();
assertThat(DataType.getFieldNames(INT())).isEmpty();
}
|
@Override
public DosFileAttributeView view(
FileLookup lookup, ImmutableMap<String, FileAttributeView> inheritedViews) {
return new View(lookup, (BasicFileAttributeView) inheritedViews.get("basic"));
}
|
@Test
public void testView() throws IOException {
DosFileAttributeView view =
provider.view(
fileLookup(),
ImmutableMap.<String, FileAttributeView>of(
"basic", new BasicAttributeProvider().view(fileLookup(), NO_INHERITED_VIEWS)));
assertNotNull(view);
assertThat(view.name()).isEqualTo("dos");
DosFileAttributes attrs = view.readAttributes();
assertThat(attrs.isHidden()).isFalse();
assertThat(attrs.isArchive()).isFalse();
assertThat(attrs.isReadOnly()).isFalse();
assertThat(attrs.isSystem()).isFalse();
view.setArchive(true);
view.setReadOnly(true);
view.setHidden(true);
view.setSystem(false);
assertThat(attrs.isHidden()).isFalse();
assertThat(attrs.isArchive()).isFalse();
assertThat(attrs.isReadOnly()).isFalse();
attrs = view.readAttributes();
assertThat(attrs.isHidden()).isTrue();
assertThat(attrs.isArchive()).isTrue();
assertThat(attrs.isReadOnly()).isTrue();
assertThat(attrs.isSystem()).isFalse();
view.setTimes(FileTime.fromMillis(0L), null, null);
assertThat(view.readAttributes().lastModifiedTime()).isEqualTo(FileTime.fromMillis(0L));
}
|
@Override
public void marshal(Exchange exchange, Object graph, OutputStream stream) throws Exception {
ResourceConverter converter = new ResourceConverter(dataFormatTypeClasses);
byte[] objectAsBytes = converter.writeDocument(new JSONAPIDocument<>(graph));
stream.write(objectAsBytes);
}
|
@Test
public void testJsonApiMarshal() throws Exception {
Class<?>[] formats = { MyBook.class, MyAuthor.class };
JsonApiDataFormat jsonApiDataFormat = new JsonApiDataFormat(formats);
MyBook book = this.generateTestDataAsObject();
Exchange exchange = new DefaultExchange(context);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
jsonApiDataFormat.marshal(exchange, book, baos);
String jsonApiOutput = baos.toString();
assertNotNull(jsonApiOutput);
assertEquals(this.generateTestDataAsString(), jsonApiOutput);
}
|
public static String prepareUrl(@NonNull String url) {
url = url.trim();
String lowerCaseUrl = url.toLowerCase(Locale.ROOT); // protocol names are case insensitive
if (lowerCaseUrl.startsWith("feed://")) {
Log.d(TAG, "Replacing feed:// with http://");
return prepareUrl(url.substring("feed://".length()));
} else if (lowerCaseUrl.startsWith("pcast://")) {
Log.d(TAG, "Removing pcast://");
return prepareUrl(url.substring("pcast://".length()));
} else if (lowerCaseUrl.startsWith("pcast:")) {
Log.d(TAG, "Removing pcast:");
return prepareUrl(url.substring("pcast:".length()));
} else if (lowerCaseUrl.startsWith("itpc")) {
Log.d(TAG, "Replacing itpc:// with http://");
return prepareUrl(url.substring("itpc://".length()));
} else if (lowerCaseUrl.startsWith(AP_SUBSCRIBE)) {
Log.d(TAG, "Removing antennapod-subscribe://");
return prepareUrl(url.substring(AP_SUBSCRIBE.length()));
} else if (lowerCaseUrl.contains(AP_SUBSCRIBE_DEEPLINK)) {
Log.d(TAG, "Removing " + AP_SUBSCRIBE_DEEPLINK);
String query = Uri.parse(url).getQueryParameter("url");
try {
return prepareUrl(URLDecoder.decode(query, "UTF-8"));
} catch (UnsupportedEncodingException e) {
return prepareUrl(query);
}
} else if (!(lowerCaseUrl.startsWith("http://") || lowerCaseUrl.startsWith("https://"))) {
Log.d(TAG, "Adding http:// at the beginning of the URL");
return "http://" + url;
} else {
return url;
}
}
|
@Test
public void testCorrectURLHttp() {
final String in = "http://example.com";
final String out = UrlChecker.prepareUrl(in);
assertEquals(in, out);
}
|
@Override
public Health health() {
Map<String, Health> healths = rateLimiterRegistry.getAllRateLimiters().stream()
.filter(this::isRegisterHealthIndicator)
.collect(Collectors.toMap(RateLimiter::getName, this::mapRateLimiterHealth));
Status status = statusAggregator.getAggregateStatus(healths.values().stream().map(Health::getStatus).collect(Collectors.toSet()));
return Health.status(status).withDetails(healths).build();
}
|
@Test
public void healthIndicatorMaxImpactCanBeOverridden() throws Exception {
// given
RateLimiterConfig config = mock(RateLimiterConfig.class);
AtomicRateLimiter.AtomicRateLimiterMetrics metrics = mock(AtomicRateLimiter.AtomicRateLimiterMetrics.class);
AtomicRateLimiter rateLimiter = mock(AtomicRateLimiter.class);
RateLimiterRegistry rateLimiterRegistry = mock(RateLimiterRegistry.class);
io.github.resilience4j.common.ratelimiter.configuration.CommonRateLimiterConfigurationProperties.InstanceProperties instanceProperties =
mock(io.github.resilience4j.common.ratelimiter.configuration.CommonRateLimiterConfigurationProperties.InstanceProperties.class);
RateLimiterConfigurationProperties rateLimiterProperties = mock(RateLimiterConfigurationProperties.class);
//when
when(rateLimiter.getRateLimiterConfig()).thenReturn(config);
when(rateLimiter.getName()).thenReturn("test");
when(rateLimiterProperties.findRateLimiterProperties("test")).thenReturn(Optional.of(instanceProperties));
when(instanceProperties.getRegisterHealthIndicator()).thenReturn(true);
boolean allowHealthIndicatorToFail = false; // do not allow health indicator to fail
when(instanceProperties.getAllowHealthIndicatorToFail()).thenReturn(allowHealthIndicatorToFail);
when(rateLimiter.getMetrics()).thenReturn(metrics);
when(rateLimiter.getDetailedMetrics()).thenReturn(metrics);
when(rateLimiterRegistry.getAllRateLimiters()).thenReturn(Set.of(rateLimiter));
when(config.getTimeoutDuration()).thenReturn(Duration.ofNanos(30L));
when(metrics.getAvailablePermissions())
.thenReturn(-2);
when(metrics.getNumberOfWaitingThreads())
.thenReturn(2);
when(metrics.getNanosToWait())
.thenReturn(40L);
// then
RateLimitersHealthIndicator healthIndicator =
new RateLimitersHealthIndicator(rateLimiterRegistry, rateLimiterProperties, new SimpleStatusAggregator());
Health health = healthIndicator.health();
then(health.getStatus()).isEqualTo(Status.UNKNOWN);
then(((Health) health.getDetails().get("test")).getStatus()).isEqualTo(new Status("RATE_LIMITED"));
then(health.getDetails().get("test")).isInstanceOf(Health.class);
then(((Health) health.getDetails().get("test")).getDetails())
.contains(
entry("availablePermissions", -2),
entry("numberOfWaitingThreads", 2)
);
}
|
@Nullable static String route(ContainerRequest request) {
ExtendedUriInfo uriInfo = request.getUriInfo();
List<UriTemplate> templates = uriInfo.getMatchedTemplates();
int templateCount = templates.size();
if (templateCount == 0) return "";
StringBuilder builder = null; // don't allocate unless you need it!
String basePath = uriInfo.getBaseUri().getPath();
String result = null;
if (!"/" .equals(basePath)) { // skip empty base paths
result = basePath;
}
for (int i = templateCount - 1; i >= 0; i--) {
String template = templates.get(i).getTemplate();
if ("/" .equals(template)) continue; // skip allocation
if (builder != null) {
builder.append(template);
} else if (result != null) {
builder = new StringBuilder(result).append(template);
result = null;
} else {
result = template;
}
}
return result != null ? result : builder != null ? builder.toString() : "";
}
|
@Test void route_nested_reverse() {
setBaseUri("/");
when(uriInfo.getMatchedTemplates()).thenReturn(Arrays.asList(
new PathTemplate("/items/{itemId}"),
new PathTemplate("/"),
new PathTemplate("/nested"),
new PathTemplate("/")
));
assertThat(SpanCustomizingApplicationEventListener.route(request))
.isEqualTo("/nested/items/{itemId}");
}
|
public static String preprocess(String literal) {
if (literal == null) {
return null;
}
StringBuilder sb = new StringBuilder(literal.length() - 2);
for (int i = 1; i < literal.length() - 1; i++) {
char ch = literal.charAt(i);
if (ch == '\\') {
if (i >= literal.length() - 2) {
throw new IllegalArgumentException("Invalid escaped literal string: " + literal);
}
char next = literal.charAt(++i);
switch (next) {
case 'b':
ch = '\b';
break;
case 'n':
ch = '\n';
break;
case 't':
ch = '\t';
break;
case 'f':
ch = '\f';
break;
case 'r':
ch = '\r';
break;
case '\\':
ch = '\\';
break;
case '\"':
ch = '\"';
break;
case '\'':
ch = '\'';
break;
default:
throw new IllegalArgumentException("Invalid escaped literal string: " + literal);
}
}
sb.append(ch);
}
return sb.toString();
}
|
@Test
public void preprocess() {
String str = "\"\\\\\\\'\\\"\\n\\\'/abc;\\t\\b\\n\\f\\r\\\\\"";
assertEquals("\\\'\"\n\'/abc;\t\b\n\f\r\\", SelTypeUtil.preprocess(str));
}
|
public void deleteObjects(List<String> keysToDelete) throws IOException {
if (CollectionUtils.isEmpty(keysToDelete)) {
LOG.warn("Keys to delete is empty.");
return;
}
int retry = 10;
int tries = 0;
while (CollectionUtils.isNotEmpty(keysToDelete)) {
DeleteObjectsRequest deleteRequest = new DeleteObjectsRequest(bucketName);
deleteRequest.setKeys(keysToDelete);
// There are two modes to do batch delete:
// 1. verbose mode: A list of all deleted objects is returned.
// 2. quiet mode: No message body is returned.
// Here, we choose the verbose mode to do batch delete.
deleteRequest.setQuiet(false);
DeleteObjectsResult result = ossClient.deleteObjects(deleteRequest);
statistics.incrementWriteOps(1);
final List<String> deletedObjects = result.getDeletedObjects();
keysToDelete = keysToDelete.stream().filter(item -> !deletedObjects.contains(item))
.collect(Collectors.toList());
tries++;
if (tries == retry) {
break;
}
}
if (tries == retry && CollectionUtils.isNotEmpty(keysToDelete)) {
// Most of time, it is impossible to try 10 times, expect the
// Aliyun OSS service problems.
throw new IOException("Failed to delete Aliyun OSS objects for " + tries + " times.");
}
}
|
@Test
public void testDeleteObjects() throws IOException, NoSuchAlgorithmException {
// generate test files
final int files = 10;
final long size = 5 * 1024 * 1024;
final String prefix = "dir";
for (int i = 0; i < files; i++) {
Path path = new Path(String.format("/%s/testFile-%d.txt", prefix, i));
ContractTestUtils.generateTestFile(this.fs, path, size, 256, 255);
}
OSSListRequest listRequest =
store.createListObjectsRequest(prefix, MAX_PAGING_KEYS_DEFAULT, null, null, true);
List<String> keysToDelete = new ArrayList<>();
OSSListResult objects = store.listObjects(listRequest);
assertEquals(files, objects.getObjectSummaries().size());
// test delete files
for (OSSObjectSummary objectSummary : objects.getObjectSummaries()) {
keysToDelete.add(objectSummary.getKey());
}
store.deleteObjects(keysToDelete);
objects = store.listObjects(listRequest);
assertEquals(0, objects.getObjectSummaries().size());
}
|
public FEELFnResult<Object> invoke(@ParameterName("list") List list) {
if ( list == null || list.isEmpty() ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null or empty"));
} else {
try {
return FEELFnResult.ofResult(Collections.max(list, new InterceptNotComparableComparator()));
} catch (ClassCastException e) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "contains items that are not comparable"));
}
}
}
|
@Test
void invokeArrayOfIntegers() {
FunctionTestUtil.assertResult(maxFunction.invoke(new Object[]{1}), 1);
FunctionTestUtil.assertResult(maxFunction.invoke(new Object[]{1, 2, 3}), 3);
FunctionTestUtil.assertResult(maxFunction.invoke(new Object[]{1, 3, 2}), 3);
FunctionTestUtil.assertResult(maxFunction.invoke(new Object[]{3, 1, 2}), 3);
}
|
public Collection<String> getLogicColumns() {
return columns.keySet();
}
|
@Test
void assertGetLogicColumns() {
assertThat(encryptTable.getLogicColumns(), is(Collections.singleton("logicColumn")));
}
|
@Override
String simpleTypeName() {
if (isRoot()) {
return "Root";
}
return lastComponent().getClass().getSimpleName();
}
|
@Test
public void testSimpleTypeName() {
DiscreteResourceId id = Resources.discrete(D1, P1, VLAN1).id();
assertThat(id.simpleTypeName(), is("VlanId"));
}
|
public static String getPath(String uriStr) {
return toURI(uriStr).getPath();
}
|
@Test
public void getPathTest(){
String url = " http://www.aaa.bbb/search?scope=ccc&q=ddd";
String path = URLUtil.getPath(url);
assertEquals("/search", path);
}
|
public static String safeAvroToJsonString(GenericRecord record) {
try {
return avroToJsonString(record, false);
} catch (Exception e) {
return record.toString();
}
}
|
@Test
void testSafeAvroToJsonStringBadDataType() {
Schema schema = new Schema.Parser().parse(EXAMPLE_SCHEMA);
GenericRecord record = new GenericData.Record(schema);
record.put("non_pii_col", "val1");
record.put("_row_key", "key");
record.put("pii_col", "val2");
record.put("timestamp", "foo");
String jsonString = HoodieAvroUtils.safeAvroToJsonString(record);
assertEquals("{\"timestamp\": \"foo\", \"_row_key\": \"key\", \"non_pii_col\": \"val1\", \"pii_col\": \"val2\"}", jsonString);
}
|
static int calculateNewArraySize(int currentSize)
{
// grow array by 50%
long newSize = (long) currentSize + (currentSize >> 1);
// verify new size is within reasonable bounds
if (newSize < DEFAULT_CAPACITY) {
newSize = DEFAULT_CAPACITY;
}
else if (newSize > MAX_ARRAY_SIZE) {
newSize = MAX_ARRAY_SIZE;
if (newSize == currentSize) {
throw new IllegalArgumentException(format("Can not grow array beyond '%s'", MAX_ARRAY_SIZE));
}
}
return (int) newSize;
}
|
@Test
public void testCalculateNewArraySize()
{
assertEquals(BlockUtil.calculateNewArraySize(200), 300);
assertEquals(BlockUtil.calculateNewArraySize(Integer.MAX_VALUE), MAX_ARRAY_SIZE);
try {
BlockUtil.calculateNewArraySize(MAX_ARRAY_SIZE);
}
catch (IllegalArgumentException e) {
assertEquals(e.getMessage(), format("Can not grow array beyond '%s'", MAX_ARRAY_SIZE));
}
}
|
@Override
public boolean addTask(T task) throws SlotNotFoundException, SlotNotActiveException {
checkRunning();
Preconditions.checkNotNull(task);
TaskSlot<T> taskSlot = getTaskSlot(task.getAllocationId());
if (taskSlot != null) {
if (taskSlot.isActive(task.getJobID(), task.getAllocationId())) {
if (taskSlot.add(task)) {
taskSlotMappings.put(
task.getExecutionId(), new TaskSlotMapping<>(task, taskSlot));
return true;
} else {
return false;
}
} else {
throw new SlotNotActiveException(task.getJobID(), task.getAllocationId());
}
} else {
throw new SlotNotFoundException(task.getAllocationId());
}
}
|
@Test
void testAddTask() throws Exception {
final JobID jobId = new JobID();
final ExecutionAttemptID executionAttemptId = createExecutionAttemptId();
final AllocationID allocationId = new AllocationID();
TaskSlotPayload task =
new TestingTaskSlotPayload(jobId, executionAttemptId, allocationId).terminate();
try (final TaskSlotTable<TaskSlotPayload> taskSlotTable =
createTaskSlotTableWithStartedTask(task)) {
Iterator<TaskSlotPayload> tasks = taskSlotTable.getTasks(jobId);
TaskSlotPayload nextTask = tasks.next();
assertThat(nextTask.getExecutionId()).isEqualTo(executionAttemptId);
assertThat(nextTask.getAllocationId()).isEqualTo(allocationId);
assertThat(tasks.hasNext()).isFalse();
}
}
|
@Override
public void onIssue(Component component, DefaultIssue issue) {
if (issue.authorLogin() != null) {
return;
}
loadScmChangesets(component);
Optional<String> scmAuthor = guessScmAuthor(issue, component);
if (scmAuthor.isPresent()) {
if (scmAuthor.get().length() <= IssueDto.AUTHOR_MAX_SIZE) {
issueUpdater.setNewAuthor(issue, scmAuthor.get(), changeContext);
} else {
LOGGER.debug("SCM account '{}' is too long to be stored as issue author", scmAuthor.get());
}
}
if (issue.assignee() == null) {
UserIdDto userId = scmAuthor.map(scmAccountToUser::getNullable).orElse(defaultAssignee.loadDefaultAssigneeUserId());
issueUpdater.setNewAssignee(issue, userId, changeContext);
}
}
|
@Test
void do_not_set_author_if_no_changeset() {
DefaultIssue issue = newIssueOnLines(1);
underTest.onIssue(FILE, issue);
assertThat(issue.authorLogin()).isNull();
}
|
@Override
public void setPartitionDataUsingJson(String partitionDataJson)
throws IOException
{
Map<Integer, Double> rawObject =
JacksonUtil.getObjectMapper().readValue(partitionDataJson, new TypeReference<HashMap<Integer, Double>>(){});
Map<Integer, PartitionData> partitionDataMap = new HashMap<>();
for (Map.Entry<Integer, Double> entry : rawObject.entrySet())
{
PartitionData data = new PartitionData(entry.getValue());
partitionDataMap.put(entry.getKey(), data);
}
_announcer.setPartitionData(partitionDataMap);
}
|
@Test
public void setPartitionDataUsingJson() throws IOException
{
final Map<Integer,Double> partitionDataExpected = new HashMap<>();
partitionDataExpected.put(1, 0.9);
partitionDataExpected.put(2, 1.5);
partitionDataExpected.put(29, 3.5);
_zooKeeperAnnouncerJmx.setPartitionDataUsingJson(PARTITION_DATA_JSON);
final Map<Integer, PartitionData> deserializedPartitionData = _zooKeeperAnnouncerJmx.getPartitionData();
Assert.assertNotNull(deserializedPartitionData);
Assert.assertEquals(deserializedPartitionData.size(), 3);
for (Map.Entry<Integer,PartitionData> entry : deserializedPartitionData.entrySet())
{
Assert.assertTrue(partitionDataExpected.containsKey(entry.getKey()));
PartitionData partitionData = deserializedPartitionData.get(entry.getKey());
Assert.assertNotNull(partitionData);
Assert.assertEquals(partitionDataExpected.get(entry.getKey()), (Double) partitionData.getWeight());
}
}
|
public String newName(Class<?> clz) {
return newName(namePrefix(clz));
}
|
@Test
public void testNewName() {
{
CodegenContext ctx = new CodegenContext();
Assert.assertEquals(ctx.newName("serializer"), "serializer");
Assert.assertEquals(ctx.newName("serializer"), "serializer1");
Assert.assertEquals(ctx.newName("serializer"), "serializer2");
}
{
CodegenContext ctx = new CodegenContext();
Assert.assertEquals(ctx.newName("serializer"), "serializer");
Assert.assertEquals(
ctx.newNames(Serializer.class, "isNull"), new String[] {"serializer1", "isNull1"});
Assert.assertEquals(ctx.newName("serializer"), "serializer2");
}
{
CodegenContext ctx = new CodegenContext();
Assert.assertEquals(ctx.newName("isNull"), "isNull");
Assert.assertEquals(
ctx.newNames("serializer", "isNull"), new String[] {"serializer1", "isNull1"});
Assert.assertEquals(ctx.newName("serializer"), "serializer2");
}
}
|
public Map<String, Set<String>> getDataSourceLogicTablesMap(final Collection<String> actualDataSourceNames) {
Map<String, Set<String>> result = new HashMap<>(actualDataSourceNames.size(), 1F);
for (String each : actualDataSourceNames) {
Set<String> logicTableNames = getLogicTableNames(each);
if (!logicTableNames.isEmpty()) {
result.put(each, logicTableNames);
}
}
return result;
}
|
@Test
void assertGetDataSourceLogicTablesMap() {
List<String> dataSources = Arrays.asList(DATASOURCE_NAME_0, DATASOURCE_NAME_1);
Map<String, Set<String>> actual = multiRouteContext.getDataSourceLogicTablesMap(dataSources);
assertThat(actual.size(), is(2));
assertThat(actual.get(DATASOURCE_NAME_0).size(), is(1));
assertThat(actual.get(DATASOURCE_NAME_0).iterator().next(), is(LOGIC_TABLE));
assertThat(actual.get(DATASOURCE_NAME_1).size(), is(1));
assertThat(actual.get(DATASOURCE_NAME_1).iterator().next(), is(LOGIC_TABLE));
}
|
@Override
public void process(Tuple input) {
String key = filterMapper.getKeyFromTuple(input);
boolean found;
JedisCommandsContainer jedisCommand = null;
try {
jedisCommand = getInstance();
switch (dataType) {
case STRING:
found = jedisCommand.exists(key);
break;
case SET:
found = jedisCommand.sismember(additionalKey, key);
break;
case HASH:
found = jedisCommand.hexists(additionalKey, key);
break;
case SORTED_SET:
found = jedisCommand.zrank(additionalKey, key) != null;
break;
case HYPER_LOG_LOG:
found = jedisCommand.pfcount(key) > 0;
break;
case GEO:
List<GeoCoordinate> geopos = jedisCommand.geopos(additionalKey, key);
if (geopos == null || geopos.isEmpty()) {
found = false;
} else {
// If any entry is NOT null, then we have a match.
found = geopos.stream()
.anyMatch(Objects::nonNull);
}
break;
default:
throw new IllegalArgumentException("Cannot process such data type: " + dataType);
}
if (found) {
collector.emit(input, input.getValues());
}
collector.ack(input);
} catch (Exception e) {
this.collector.reportError(e);
this.collector.fail(input);
}
}
|
@Test
void smokeTest_zrank_notMember() {
// Define input key
final String setKey = "ThisIsMySetKey";
final String inputKey = "ThisIsMyKey";
// Create an input tuple
final Map<String, Object> values = new HashMap<>();
values.put("key", inputKey);
values.put("value", "ThisIsMyValue");
final Tuple tuple = new StubTuple(values);
final JedisPoolConfig config = configBuilder.build();
final TestMapper mapper = new TestMapper(SORTED_SET, setKey);
final RedisFilterBolt bolt = new RedisFilterBolt(config, mapper);
bolt.prepare(new HashMap<>(), topologyContext, new OutputCollector(outputCollector));
bolt.process(tuple);
// Verify the bolt filtered the input tuple.
verifyTupleFiltered();
}
|
public <T> void post(String url, Header header, Query query, Object body, Type responseType, Callback<T> callback) {
execute(url, HttpMethod.POST, new RequestHttpEntity(header, query, body), responseType, callback);
}
|
@Test
void testPost() throws Exception {
restTemplate.post(TEST_URL, Header.EMPTY, Query.EMPTY, "body", String.class, mockCallback);
verify(requestClient).execute(any(), eq("POST"), any(), any(), eq(mockCallback));
}
|
public static String longToHex(Long l) {
return prepareNumberHexString(l, true, false, HEX_LEN_MIN, HEX_LEN_LONG_MAX);
}
|
@Test
public void longToHex_Test() {
Assertions.assertEquals("0x7FFFFFFFFFFFFFFF", TbUtils.longToHex(Long.MAX_VALUE, true, true));
Assertions.assertEquals("0x8000000000000000", TbUtils.longToHex(Long.MIN_VALUE, true, true));
Assertions.assertEquals("0xFFD8FFA6FFD8FFA6", TbUtils.longToHex(0xFFD8FFA6FFD8FFA6L, true, true));
Assertions.assertEquals("0xA6FFD8FFA6FFCEFF", TbUtils.longToHex(0xFFCEFFA6FFD8FFA6L, false, true));
Assertions.assertEquals("0xAB", TbUtils.longToHex(0xABL, true, true));
Assertions.assertEquals("0xABCD", TbUtils.longToHex(0xABCDL, true, true));
Assertions.assertEquals("0xABCDEF", TbUtils.longToHex(0xABCDEFL, true, true));
Assertions.assertEquals("0xABEFCDAB", TbUtils.longToHex(0xABCDEFABCDEFL, false, true, 8));
Assertions.assertEquals("0xAB", TbUtils.longToHex(0xABL, true, true, 2));
Assertions.assertEquals("AB", TbUtils.longToHex(0xABL, false, false, 2));
Assertions.assertEquals("0xFFA6", TbUtils.longToHex(0xFFD8FFA6FFD8FFA6L, true, true, 4));
Assertions.assertEquals("D8FF", TbUtils.longToHex(0xFFD8FFA6FFD8FFA6L, false, false, 4));
}
|
public static boolean supportsTextBlocks(Context context) {
return sourceIsAtLeast(context, 15);
}
|
@Test
public void supportsTextBlocks_notSupported() {
Context context = contextWithSourceVersion("14");
assertThat(SourceVersion.supportsTextBlocks(context)).isFalse();
}
|
public static ConfigurableResource parseResourceConfigValue(String value)
throws AllocationConfigurationException {
return parseResourceConfigValue(value, Long.MAX_VALUE);
}
|
@Test
public void testGibberish() throws Exception {
String value = "1o24vc0res";
expectUnparsableResource(value);
parseResourceConfigValue(value);
}
|
static KiePMMLCluster getKiePMMLCluster(Cluster cluster) {
final List<Double> values = getClusterDoubleValues(cluster);
return new KiePMMLCluster(cluster.getId(), cluster.getName(), values);
}
|
@Test
void getKiePMMLCluster() {
Cluster cluster = new Cluster();
cluster.setId("ID");
cluster.setName("NAME");
final Random random = new Random();
final List<Double> doubleValues =
IntStream.range(0, 3).mapToObj(i -> random.nextDouble()).collect(Collectors.toList());
final List<String> values = doubleValues.stream().map(String::valueOf).collect(Collectors.toList());
Array array = getArray(Array.Type.REAL, values);
cluster.setArray(array);
KiePMMLCluster retrieved =
KiePMMLClusteringModelFactory.getKiePMMLCluster(cluster);
commonEvaluateKiePMMLCluster(retrieved, cluster);
}
|
public static List<KiePMMLFieldOperatorValue> getConstraintEntriesFromXOrCompoundPredicate(final CompoundPredicate compoundPredicate, final Map<String, KiePMMLOriginalTypeGeneratedType> fieldTypeMap) {
if (!CompoundPredicate.BooleanOperator.XOR.equals(compoundPredicate.getBooleanOperator())) {
throw new KiePMMLException(String.format("getConstraintEntriesFromXOrCompoundPredicate invoked with %s CompoundPredicate", compoundPredicate.getBooleanOperator()));
}
// Managing only SimplePredicates for the moment being
final List<Predicate> simplePredicates = compoundPredicate.getPredicates().stream().filter(predicate -> predicate instanceof SimplePredicate).collect(Collectors.toList());
if (simplePredicates.size() < 2) {
throw new KiePMMLException("At least two elements expected for XOR operations");
}
if (simplePredicates.size() > 2) {
// Not managed yet
throw new KiePMMLException("More then two elements not managed, yet, for XOR operations");
}
return getXORConstraintEntryFromSimplePredicates(simplePredicates, fieldTypeMap);
}
|
@Test
void getConstraintEntriesFromXOrCompoundPredicateWrongOperator() {
assertThatExceptionOfType(KiePMMLException.class).isThrownBy(() -> {
CompoundPredicate compoundPredicate = new CompoundPredicate();
compoundPredicate.setBooleanOperator(CompoundPredicate.BooleanOperator.AND);
KiePMMLASTFactoryUtils.getConstraintEntriesFromXOrCompoundPredicate(compoundPredicate, fieldTypeMap);
});
}
|
static AmqpMessageCoder of() {
return new AmqpMessageCoder();
}
|
@Test
public void encodeDecodeLargeMessage() throws Exception {
Message message = Message.Factory.create();
message.setAddress("address");
message.setSubject("subject");
String body = Joiner.on("").join(Collections.nCopies(32 * 1024 * 1024, " "));
message.setBody(new AmqpValue(body));
AmqpMessageCoder coder = AmqpMessageCoder.of();
Message clone = CoderUtils.clone(coder, message);
assertEquals(message.getBody().toString(), clone.getBody().toString());
}
|
@VisibleForTesting
static boolean isBrokenPipe(IOException original) {
Throwable exception = original;
while (exception != null) {
String message = exception.getMessage();
if (message != null && message.toLowerCase(Locale.US).contains("broken pipe")) {
return true;
}
exception = exception.getCause();
if (exception == original) { // just in case if there's a circular chain
return false;
}
}
return false;
}
|
@Test
public void testIsBrokenPipe_terminatesWhenCauseIsOriginal() {
IOException exception = Mockito.mock(IOException.class);
Mockito.when(exception.getCause()).thenReturn(exception);
Assert.assertFalse(RegistryEndpointCaller.isBrokenPipe(exception));
}
|
public SendResult putMessageToRemoteBroker(MessageExtBrokerInner messageExt, String brokerNameToSend) {
if (this.brokerController.getBrokerConfig().getBrokerName().equals(brokerNameToSend)) { // not remote broker
return null;
}
final boolean isTransHalfMessage = TransactionalMessageUtil.buildHalfTopic().equals(messageExt.getTopic());
MessageExtBrokerInner messageToPut = messageExt;
if (isTransHalfMessage) {
messageToPut = TransactionalMessageUtil.buildTransactionalMessageFromHalfMessage(messageExt);
}
final TopicPublishInfo topicPublishInfo = this.brokerController.getTopicRouteInfoManager().tryToFindTopicPublishInfo(messageToPut.getTopic());
if (null == topicPublishInfo || !topicPublishInfo.ok()) {
LOG.warn("putMessageToRemoteBroker: no route info of topic {} when escaping message, msgId={}",
messageToPut.getTopic(), messageToPut.getMsgId());
return null;
}
final MessageQueue mqSelected;
if (StringUtils.isEmpty(brokerNameToSend)) {
mqSelected = topicPublishInfo.selectOneMessageQueue(this.brokerController.getBrokerConfig().getBrokerName());
messageToPut.setQueueId(mqSelected.getQueueId());
brokerNameToSend = mqSelected.getBrokerName();
if (this.brokerController.getBrokerConfig().getBrokerName().equals(brokerNameToSend)) {
LOG.warn("putMessageToRemoteBroker failed, remote broker not found. Topic: {}, MsgId: {}, Broker: {}",
messageExt.getTopic(), messageExt.getMsgId(), brokerNameToSend);
return null;
}
} else {
mqSelected = new MessageQueue(messageExt.getTopic(), brokerNameToSend, messageExt.getQueueId());
}
final String brokerAddrToSend = this.brokerController.getTopicRouteInfoManager().findBrokerAddressInPublish(brokerNameToSend);
if (null == brokerAddrToSend) {
LOG.warn("putMessageToRemoteBroker failed, remote broker address not found. Topic: {}, MsgId: {}, Broker: {}",
messageExt.getTopic(), messageExt.getMsgId(), brokerNameToSend);
return null;
}
final long beginTimestamp = System.currentTimeMillis();
try {
final SendResult sendResult = this.brokerController.getBrokerOuterAPI().sendMessageToSpecificBroker(
brokerAddrToSend, brokerNameToSend,
messageToPut, this.getProducerGroup(messageToPut), SEND_TIMEOUT);
if (null != sendResult && SendStatus.SEND_OK.equals(sendResult.getSendStatus())) {
return sendResult;
} else {
LOG.error("Escaping failed! cost {}ms, Topic: {}, MsgId: {}, Broker: {}",
System.currentTimeMillis() - beginTimestamp, messageExt.getTopic(),
messageExt.getMsgId(), brokerNameToSend);
}
} catch (RemotingException | MQBrokerException e) {
LOG.error(String.format("putMessageToRemoteBroker exception, MsgId: %s, RT: %sms, Broker: %s",
messageToPut.getMsgId(), System.currentTimeMillis() - beginTimestamp, mqSelected), e);
} catch (InterruptedException e) {
LOG.error(String.format("putMessageToRemoteBroker interrupted, MsgId: %s, RT: %sms, Broker: %s",
messageToPut.getMsgId(), System.currentTimeMillis() - beginTimestamp, mqSelected), e);
Thread.currentThread().interrupt();
}
return null;
}
|
@Test
public void testPutMessageToRemoteBroker_specificBrokerName_addressFound() throws Exception {
MessageExtBrokerInner message = new MessageExtBrokerInner();
message.setTopic(TEST_TOPIC);
String anotherBrokerName = "broker_b";
TopicPublishInfo publishInfo = mockTopicPublishInfo(BROKER_NAME, anotherBrokerName);
when(topicRouteInfoManager.tryToFindTopicPublishInfo(anyString())).thenReturn(publishInfo);
when(topicRouteInfoManager.findBrokerAddressInPublish(anotherBrokerName)).thenReturn("127.0.0.1");
escapeBridge.putMessageToRemoteBroker(message, anotherBrokerName);
verify(brokerOuterAPI).sendMessageToSpecificBroker(eq("127.0.0.1"), eq(anotherBrokerName), any(MessageExtBrokerInner.class), anyString(), anyLong());
}
|
public static void replaceFile(File src, File target) throws IOException {
/* renameTo() has two limitations on Windows platform.
* src.renameTo(target) fails if
* 1) If target already exists OR
* 2) If target is already open for reading/writing.
*/
if (!src.renameTo(target)) {
int retries = 5;
while (target.exists() && !target.delete() && retries-- >= 0) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
throw new IOException("replaceFile interrupted.");
}
}
if (!src.renameTo(target)) {
throw new IOException("Unable to rename " + src +
" to " + target);
}
}
}
|
@Test (timeout = 30000)
public void testReplaceFile() throws IOException {
// src exists, and target does not exist:
final File srcFile = Verify.createNewFile(new File(tmp, "src"));
final File targetFile = new File(tmp, "target");
Verify.notExists(targetFile);
FileUtil.replaceFile(srcFile, targetFile);
Verify.notExists(srcFile);
Verify.exists(targetFile);
// src exists and target is a regular file:
Verify.createNewFile(srcFile);
Verify.exists(srcFile);
FileUtil.replaceFile(srcFile, targetFile);
Verify.notExists(srcFile);
Verify.exists(targetFile);
// src exists, and target is a non-empty directory:
Verify.createNewFile(srcFile);
Verify.exists(srcFile);
Verify.delete(targetFile);
Verify.mkdirs(targetFile);
File obstacle = Verify.createNewFile(new File(targetFile, "obstacle"));
assertTrue(targetFile.exists() && targetFile.isDirectory());
try {
FileUtil.replaceFile(srcFile, targetFile);
assertTrue(false);
} catch (IOException ioe) {
// okay
}
// check up the post-condition: nothing is deleted:
Verify.exists(srcFile);
assertTrue(targetFile.exists() && targetFile.isDirectory());
Verify.exists(obstacle);
}
|
public static void copy(
List<ResourceId> srcResourceIds, List<ResourceId> destResourceIds, MoveOptions... moveOptions)
throws IOException {
validateSrcDestLists(srcResourceIds, destResourceIds);
if (srcResourceIds.isEmpty()) {
return;
}
FileSystem fileSystem = getFileSystemInternal(srcResourceIds.iterator().next().getScheme());
FilterResult filtered = filterFiles(fileSystem, srcResourceIds, destResourceIds, moveOptions);
if (!filtered.resultSources.isEmpty()) {
fileSystem.copy(filtered.resultSources, filtered.resultDestinations);
}
}
|
@Test
public void testCopySkipIfItExists() throws Exception {
Path srcPath1 = temporaryFolder.newFile().toPath();
Path srcPath2 = temporaryFolder.newFile().toPath();
Path destPath1 = srcPath1.resolveSibling("dest1");
Path destPath2 = srcPath2.resolveSibling("dest2");
createFileWithContent(srcPath1, "content1");
createFileWithContent(srcPath2, "content3");
createFileWithContent(destPath2, "content");
FileSystems.copy(
toResourceIds(ImmutableList.of(srcPath1, srcPath2), false /* isDirectory */),
toResourceIds(ImmutableList.of(destPath1, destPath2), false /* isDirectory */),
MoveOptions.StandardMoveOptions.SKIP_IF_DESTINATION_EXISTS);
assertTrue(srcPath1.toFile().exists());
assertTrue(srcPath2.toFile().exists());
assertThat(
Files.readLines(destPath1.toFile(), StandardCharsets.UTF_8),
containsInAnyOrder("content1"));
// The file is overwritten because the content does not match.
assertThat(
Files.readLines(destPath2.toFile(), StandardCharsets.UTF_8),
containsInAnyOrder("content3"));
}
|
@VisibleForTesting
static Comparator<ActualProperties> streamingExecutionPreference(PreferredProperties preferred)
{
// Calculating the matches can be a bit expensive, so cache the results between comparisons
LoadingCache<List<LocalProperty<VariableReferenceExpression>>, List<Optional<LocalProperty<VariableReferenceExpression>>>> matchCache = CacheBuilder.newBuilder()
.build(CacheLoader.from(actualProperties -> LocalProperties.match(actualProperties, preferred.getLocalProperties())));
return (actual1, actual2) -> {
List<Optional<LocalProperty<VariableReferenceExpression>>> matchLayout1 = matchCache.getUnchecked(actual1.getLocalProperties());
List<Optional<LocalProperty<VariableReferenceExpression>>> matchLayout2 = matchCache.getUnchecked(actual2.getLocalProperties());
return ComparisonChain.start()
.compareTrueFirst(hasLocalOptimization(preferred.getLocalProperties(), matchLayout1), hasLocalOptimization(preferred.getLocalProperties(), matchLayout2))
.compareTrueFirst(meetsPartitioningRequirements(preferred, actual1), meetsPartitioningRequirements(preferred, actual2))
.compare(matchLayout1, matchLayout2, matchedLayoutPreference())
.result();
};
}
|
@Test
public void testPickLayoutPartitionedPreference()
{
Comparator<ActualProperties> preference = streamingExecutionPreference(PreferredProperties.distributed());
List<ActualProperties> input = ImmutableList.<ActualProperties>builder()
.add(builder()
.global(streamPartitionedOn("a"))
.build())
.add(builder()
.global(singleStreamPartition())
.build())
.add(builder()
.global(arbitraryPartition())
.local(ImmutableList.of(grouped("a", "b")))
.build())
.add(builder()
.global(arbitraryPartition())
.build())
.add(builder()
.global(hashDistributedOn("a"))
.build())
.add(builder()
.global(singleStream())
.local(ImmutableList.of(constant("a"), sorted("b", ASC_NULLS_FIRST)))
.build())
.add(builder()
.global(singleStreamPartition())
.local(ImmutableList.of(sorted("a", ASC_NULLS_FIRST)))
.build())
.build();
List<ActualProperties> expected = ImmutableList.<ActualProperties>builder()
.add(builder()
.global(streamPartitionedOn("a"))
.build())
.add(builder()
.global(arbitraryPartition())
.local(ImmutableList.of(grouped("a", "b")))
.build())
.add(builder()
.global(arbitraryPartition())
.build())
.add(builder()
.global(hashDistributedOn("a"))
.build())
.add(builder()
.global(singleStream())
.local(ImmutableList.of(constant("a"), sorted("b", ASC_NULLS_FIRST)))
.build())
.add(builder()
.global(singleStreamPartition())
.build())
.add(builder()
.global(singleStreamPartition())
.local(ImmutableList.of(sorted("a", ASC_NULLS_FIRST)))
.build())
.build();
assertEquals(stableSort(input, preference), expected);
}
|
@Override
public LongBitMask clone() {
return new LongBitMask(mask);
}
|
@Test
public void testClone() {
assertThat(new LongBitMask(1L).clone().asLong()).isEqualTo(1L);
}
|
@Override
public Integer call() throws Exception {
super.call();
to = to.startsWith("/") ? to : "/" + to;
to = to.endsWith("/") ? to : to + "/";
try (var files = Files.walk(from); DefaultHttpClient client = client()) {
if (delete) {
client.toBlocking().exchange(this.requestOptions(HttpRequest.DELETE(apiUri("/namespaces/") + namespace + "/files?path=" + to, null)));
}
KestraIgnore kestraIgnore = new KestraIgnore(from);
List<Path> paths = files
.filter(Files::isRegularFile)
.filter(path -> !kestraIgnore.isIgnoredFile(path.toString(), true))
.toList();
paths.forEach(path -> {
MultipartBody body = MultipartBody.builder()
.addPart("fileContent", path.toFile())
.build();
String relativizedPath = from.relativize(path).toString();
String destination = to + relativizedPath;
client.toBlocking().exchange(
this.requestOptions(
HttpRequest.POST(
apiUri("/namespaces/") + namespace + "/files?path=" + destination,
body
).contentType(MediaType.MULTIPART_FORM_DATA)
)
);
stdOut("Successfully uploaded {0} to {1}", path.toString(), destination);
});
} catch (HttpClientResponseException e) {
AbstractValidateCommand.handleHttpException(e, "namespace");
return 1;
}
return 0;
}
|
@Test
void runWithIgnore() throws URISyntaxException {
URL directory = NamespaceFilesUpdateCommandTest.class.getClassLoader().getResource("namespacefiles/ignore/");
ByteArrayOutputStream out = new ByteArrayOutputStream();
System.setOut(new PrintStream(out));
try (ApplicationContext ctx = ApplicationContext.run(Environment.CLI, Environment.TEST)) {
EmbeddedServer embeddedServer = ctx.getBean(EmbeddedServer.class);
embeddedServer.start();
String[] args = {
"--server",
embeddedServer.getURL().toString(),
"--user",
"myuser:pass:word",
"--delete",
"io.kestra.cli",
directory.getPath(),
};
PicocliRunner.call(NamespaceFilesUpdateCommand.class, ctx, args);
assertTransferMessage(out, "2", null);
assertTransferMessage(out, "1", null);
assertTransferMessage(out, "flows/flow.yml", null, false);
out.reset();
}
}
|
public static SourceConfig validateUpdate(SourceConfig existingConfig, SourceConfig newConfig) {
SourceConfig mergedConfig = clone(existingConfig);
if (!existingConfig.getTenant().equals(newConfig.getTenant())) {
throw new IllegalArgumentException("Tenants differ");
}
if (!existingConfig.getNamespace().equals(newConfig.getNamespace())) {
throw new IllegalArgumentException("Namespaces differ");
}
if (!existingConfig.getName().equals(newConfig.getName())) {
throw new IllegalArgumentException("Function Names differ");
}
if (!StringUtils.isEmpty(newConfig.getClassName())) {
mergedConfig.setClassName(newConfig.getClassName());
}
if (!StringUtils.isEmpty(newConfig.getTopicName())) {
mergedConfig.setTopicName(newConfig.getTopicName());
}
if (!StringUtils.isEmpty(newConfig.getSerdeClassName())) {
mergedConfig.setSerdeClassName(newConfig.getSerdeClassName());
}
if (!StringUtils.isEmpty(newConfig.getSchemaType())) {
mergedConfig.setSchemaType(newConfig.getSchemaType());
}
if (newConfig.getConfigs() != null) {
mergedConfig.setConfigs(newConfig.getConfigs());
}
if (newConfig.getSecrets() != null) {
mergedConfig.setSecrets(newConfig.getSecrets());
}
if (!StringUtils.isEmpty(newConfig.getLogTopic())) {
mergedConfig.setLogTopic(newConfig.getLogTopic());
}
if (newConfig.getProcessingGuarantees() != null && !newConfig.getProcessingGuarantees()
.equals(existingConfig.getProcessingGuarantees())) {
throw new IllegalArgumentException("Processing Guarantees cannot be altered");
}
if (newConfig.getParallelism() != null) {
mergedConfig.setParallelism(newConfig.getParallelism());
}
if (newConfig.getResources() != null) {
mergedConfig
.setResources(ResourceConfigUtils.merge(existingConfig.getResources(), newConfig.getResources()));
}
if (!StringUtils.isEmpty(newConfig.getArchive())) {
mergedConfig.setArchive(newConfig.getArchive());
}
if (!StringUtils.isEmpty(newConfig.getRuntimeFlags())) {
mergedConfig.setRuntimeFlags(newConfig.getRuntimeFlags());
}
if (!StringUtils.isEmpty(newConfig.getCustomRuntimeOptions())) {
mergedConfig.setCustomRuntimeOptions(newConfig.getCustomRuntimeOptions());
}
if (isBatchSource(existingConfig) != isBatchSource(newConfig)) {
throw new IllegalArgumentException("Sources cannot be update between regular sources and batchsource");
}
if (newConfig.getBatchSourceConfig() != null) {
validateBatchSourceConfigUpdate(existingConfig.getBatchSourceConfig(), newConfig.getBatchSourceConfig());
mergedConfig.setBatchSourceConfig(newConfig.getBatchSourceConfig());
}
if (newConfig.getProducerConfig() != null) {
mergedConfig.setProducerConfig(newConfig.getProducerConfig());
}
return mergedConfig;
}
|
@Test
public void testMergeDifferentLogTopic() {
SourceConfig sourceConfig = createSourceConfig();
SourceConfig newSourceConfig = createUpdatedSourceConfig("logTopic", "Different");
SourceConfig mergedConfig = SourceConfigUtils.validateUpdate(sourceConfig, newSourceConfig);
assertEquals(
mergedConfig.getLogTopic(),
"Different"
);
mergedConfig.setLogTopic(sourceConfig.getLogTopic());
assertEquals(
new Gson().toJson(sourceConfig),
new Gson().toJson(mergedConfig)
);
}
|
@Override
public <T> T convert(DataTable dataTable, Type type) {
return convert(dataTable, type, false);
}
|
@Test
void convert_to_list_of_map() {
DataTable table = parse("",
"| firstName | lastName | birthDate |",
"| Annie M. G. | Schmidt | 1911-03-20 |",
"| Roald | Dahl | 1916-09-13 |",
"| Astrid | Lindgren | 1907-11-14 |");
List<HashMap<String, String>> expected = asList(
new HashMap<String, String>() {
{
put("firstName", "Annie M. G.");
put("lastName", "Schmidt");
put("birthDate", "1911-03-20");
}
},
new HashMap<String, String>() {
{
put("firstName", "Roald");
put("lastName", "Dahl");
put("birthDate", "1916-09-13");
}
},
new HashMap<String, String>() {
{
put("firstName", "Astrid");
put("lastName", "Lindgren");
put("birthDate", "1907-11-14");
}
});
assertEquals(expected, converter.convert(table, LIST_OF_MAP));
}
|
@Override
public <T extends State> T state(StateNamespace namespace, StateTag<T> address) {
return workItemState.get(namespace, address, StateContexts.nullContext());
}
|
@Test
public void testMultimapCacheComplete() {
final String tag = "multimap";
StateTag<MultimapState<byte[], Integer>> addr =
StateTags.multimap(tag, ByteArrayCoder.of(), VarIntCoder.of());
MultimapState<byte[], Integer> multimapState = underTest.state(NAMESPACE, addr);
final byte[] key = "key".getBytes(StandardCharsets.UTF_8);
SettableFuture<Iterable<Map.Entry<ByteString, Iterable<Integer>>>> entriesFuture =
SettableFuture.create();
when(mockReader.multimapFetchAllFuture(
false, key(NAMESPACE, tag), STATE_FAMILY, VarIntCoder.of()))
.thenReturn(entriesFuture);
// to set up the multimap as cache complete
waitAndSet(entriesFuture, weightedList(multimapEntry(key, 1, 2, 3)), 30);
multimapState.entries().read();
multimapState.put(key, 2);
when(mockReader.multimapFetchAllFuture(
anyBoolean(), eq(key(NAMESPACE, tag)), eq(STATE_FAMILY), eq(VarIntCoder.of())))
.thenThrow(
new RuntimeException(
"The multimap is cache complete and should not perform any windmill read."));
when(mockReader.multimapFetchSingleEntryFuture(
any(), eq(key(NAMESPACE, tag)), eq(STATE_FAMILY), eq(VarIntCoder.of())))
.thenThrow(
new RuntimeException(
"The multimap is cache complete and should not perform any windmill read."));
Iterable<Map.Entry<byte[], Integer>> entries = multimapState.entries().read();
assertEquals(4, Iterables.size(entries));
assertThat(
entries,
Matchers.containsInAnyOrder(
multimapEntryMatcher(key, 1),
multimapEntryMatcher(key, 2),
multimapEntryMatcher(key, 3),
multimapEntryMatcher(key, 2)));
Iterable<byte[]> keys = multimapState.keys().read();
assertThat(keys, Matchers.containsInAnyOrder(key));
Iterable<Integer> values = multimapState.get(dup(key)).read();
assertThat(values, Matchers.containsInAnyOrder(1, 2, 2, 3));
}
|
public Optional<RouteMapper> findTableMapper(final String logicDataSourceName, final String actualTableName) {
for (RouteMapper each : tableMappers) {
if (logicDataSourceName.equalsIgnoreCase(dataSourceMapper.getLogicName()) && actualTableName.equalsIgnoreCase(each.getActualName())) {
return Optional.of(each);
}
}
return Optional.empty();
}
|
@Test
void assertFindTableMapper() {
Optional<RouteMapper> actual = routeUnit.findTableMapper(LOGIC_DATA_SOURCE, ACTUAL_TABLE_0);
assertTrue(actual.isPresent());
assertThat(actual.get().getLogicName(), is(LOGIC_TABLE));
assertThat(actual.get().getActualName(), is(ACTUAL_TABLE_0));
}
|
public static TableSchema toSchema(RowType rowType) {
TableSchema.Builder builder = TableSchema.builder();
for (RowType.RowField field : rowType.getFields()) {
builder.field(field.getName(), TypeConversions.fromLogicalToDataType(field.getType()));
}
return builder.build();
}
|
@Test
public void testConvertFlinkSchemaWithNestedColumnInPrimaryKeys() {
Schema icebergSchema =
new Schema(
Lists.newArrayList(
Types.NestedField.required(
1,
"struct",
Types.StructType.of(
Types.NestedField.required(2, "inner", Types.IntegerType.get())))),
Sets.newHashSet(2));
assertThatThrownBy(() -> FlinkSchemaUtil.toSchema(icebergSchema))
.isInstanceOf(ValidationException.class)
.hasMessageStartingWith("Could not create a PRIMARY KEY")
.hasMessageContaining("Column 'struct.inner' does not exist.");
}
|
@Nonnull
public static Configuration createConfiguration(Properties properties) {
final Configuration configuration = new Configuration();
final Set<String> propertyNames = properties.stringPropertyNames();
for (String propertyName : propertyNames) {
configuration.setString(propertyName, properties.getProperty(propertyName));
}
return configuration;
}
|
@TestTemplate
void testPropertiesToConfiguration() {
final Properties properties = new Properties();
final int entries = 10;
for (int i = 0; i < entries; i++) {
properties.setProperty("key" + i, "value" + i);
}
final Configuration configuration = ConfigurationUtils.createConfiguration(properties);
for (String key : properties.stringPropertyNames()) {
assertThat(configuration.getString(key, "")).isEqualTo(properties.getProperty(key));
}
assertThat(configuration.toMap()).hasSize(properties.size());
}
|
public static byte[] tryDecompress(InputStream raw) throws IOException {
try (
GZIPInputStream gis = new GZIPInputStream(raw);
ByteArrayOutputStream out = new ByteArrayOutputStream()) {
copy(gis, out);
return out.toByteArray();
}
}
|
@Test
public void testTryDecompressInputStream() throws IOException {
byte[] inputBytes = "This is a test string.".getBytes("UTF-8");
ByteArrayOutputStream compressedOutput = new ByteArrayOutputStream();
try (GZIPOutputStream gzipOutputStream = new GZIPOutputStream(compressedOutput)) {
gzipOutputStream.write(inputBytes);
}
byte[] compressedBytes = compressedOutput.toByteArray();
ByteArrayInputStream compressedInput = new ByteArrayInputStream(compressedBytes);
byte[] decompressedBytes = IoUtil.tryDecompress(compressedInput);
Assert.assertNotNull(decompressedBytes);
Assert.assertTrue(decompressedBytes.length > 0);
Assert.assertArrayEquals(inputBytes, decompressedBytes);
}
|
public boolean isAbilitySupportedByServer(AbilityKey abilityKey) {
return rpcClient.getConnectionAbility(abilityKey) == AbilityStatus.SUPPORTED;
}
|
@Test
void testIsAbilitySupportedByServer2() {
when(this.rpcClient.getConnectionAbility(AbilityKey.SERVER_SUPPORT_PERSISTENT_INSTANCE_BY_GRPC)).thenReturn(
AbilityStatus.NOT_SUPPORTED);
assertFalse(client.isAbilitySupportedByServer(AbilityKey.SERVER_SUPPORT_PERSISTENT_INSTANCE_BY_GRPC));
verify(this.rpcClient, times(1)).getConnectionAbility(AbilityKey.SERVER_SUPPORT_PERSISTENT_INSTANCE_BY_GRPC);
}
|
@Override
public void registerInstance(Service service, Instance instance, String clientId) throws NacosException {
NamingUtils.checkInstanceIsLegal(instance);
Service singleton = ServiceManager.getInstance().getSingleton(service);
if (!singleton.isEphemeral()) {
throw new NacosRuntimeException(NacosException.INVALID_PARAM,
String.format("Current service %s is persistent service, can't register ephemeral instance.",
singleton.getGroupedServiceName()));
}
Client client = clientManager.getClient(clientId);
checkClientIsLegal(client, clientId);
InstancePublishInfo instanceInfo = getPublishInfo(instance);
client.addServiceInstance(singleton, instanceInfo);
client.setLastUpdatedTime();
client.recalculateRevision();
NotifyCenter.publishEvent(new ClientOperationEvent.ClientRegisterServiceEvent(singleton, clientId));
NotifyCenter
.publishEvent(new MetadataEvent.InstanceMetadataEvent(singleton, instanceInfo.getMetadataId(), false));
}
|
@Test
void testRegisterInstanceWithInvalidClusterName() throws NacosException {
Throwable exception = assertThrows(NacosException.class, () -> {
when(instance.getClusterName()).thenReturn("cluster1,cluster2");
ephemeralClientOperationServiceImpl.registerInstance(service, instance, ipPortBasedClientId);
});
assertTrue(exception.getMessage()
.contains("Instance 'clusterName' should be characters with only 0-9a-zA-Z-. (current: cluster1,cluster2)"));
}
|
@Override
protected InputStream openObject(String key, OpenOptions options,
RetryPolicy retryPolicy) throws IOException {
try {
return new COSInputStream(mBucketNameInternal, key, mClient, options.getOffset(), retryPolicy,
mUfsConf.getBytes(PropertyKey.UNDERFS_OBJECT_STORE_MULTI_RANGE_CHUNK_SIZE));
} catch (CosClientException e) {
throw AlluxioCosException.from(e);
}
}
|
@Test
public void testOpenObject() throws IOException {
// test successful open object
Mockito.when(mClient.getObject(ArgumentMatchers.anyString(), ArgumentMatchers.anyString()))
.thenReturn(new COSObject());
OpenOptions options = OpenOptions.defaults();
RetryPolicy retryPolicy = new CountingRetry(1);
InputStream result = mCOSUnderFileSystem.openObject(KEY, options, retryPolicy);
Assert.assertTrue(result instanceof COSInputStream);
}
|
@Override
public MetricQueryResults queryMetrics(MetricsFilter filter) {
List<MetricUpdate> metricUpdates;
ImmutableList<MetricResult<Long>> counters = ImmutableList.of();
ImmutableList<MetricResult<DistributionResult>> distributions = ImmutableList.of();
ImmutableList<MetricResult<GaugeResult>> gauges = ImmutableList.of();
ImmutableList<MetricResult<StringSetResult>> stringSets = ImmutableList.of();
JobMetrics jobMetrics;
try {
jobMetrics = getJobMetrics();
} catch (IOException e) {
LOG.warn("Unable to query job metrics.\n");
return MetricQueryResults.create(counters, distributions, gauges, stringSets);
}
metricUpdates = firstNonNull(jobMetrics.getMetrics(), Collections.emptyList());
return populateMetricQueryResults(metricUpdates, filter);
}
|
@Test
public void testTemplateJobMetricsThrowsUsefulError() throws Exception {
DataflowClient dataflowClient = mock(DataflowClient.class);
DataflowMetrics metrics = new DataflowMetrics(new DataflowTemplateJob(), dataflowClient);
assertThrows(
"The result of template creation should not be used.",
UnsupportedOperationException.class,
() -> metrics.allMetrics());
assertThrows(
"The result of template creation should not be used.",
UnsupportedOperationException.class,
() -> metrics.queryMetrics(MetricsFilter.builder().build()));
}
|
@Override
public Properties getConfig(RedisClusterNode node, String pattern) {
RedisClient entry = getEntry(node);
RFuture<List<String>> f = executorService.writeAsync(entry, StringCodec.INSTANCE, RedisCommands.CONFIG_GET, pattern);
List<String> r = syncFuture(f);
if (r != null) {
return Converters.toProperties(r);
}
return null;
}
|
@Test
public void testGetConfig() {
RedisClusterNode master = getFirstMaster();
Properties config = connection.getConfig(master, "*");
assertThat(config.size()).isGreaterThan(20);
}
|
public static MySQLCommandPacket newInstance(final MySQLCommandPacketType commandPacketType, final MySQLPacketPayload payload,
final ConnectionSession connectionSession) {
switch (commandPacketType) {
case COM_QUIT:
return new MySQLComQuitPacket();
case COM_INIT_DB:
return new MySQLComInitDbPacket(payload);
case COM_FIELD_LIST:
return new MySQLComFieldListPacket(payload);
case COM_QUERY:
return new MySQLComQueryPacket(payload);
case COM_STMT_PREPARE:
return new MySQLComStmtPreparePacket(payload);
case COM_STMT_EXECUTE:
MySQLServerPreparedStatement serverPreparedStatement =
connectionSession.getServerPreparedStatementRegistry().getPreparedStatement(payload.getByteBuf().getIntLE(payload.getByteBuf().readerIndex()));
return new MySQLComStmtExecutePacket(payload, serverPreparedStatement.getSqlStatementContext().getSqlStatement().getParameterCount());
case COM_STMT_SEND_LONG_DATA:
return new MySQLComStmtSendLongDataPacket(payload);
case COM_STMT_RESET:
return new MySQLComStmtResetPacket(payload);
case COM_STMT_CLOSE:
return new MySQLComStmtClosePacket(payload);
case COM_SET_OPTION:
return new MySQLComSetOptionPacket(payload);
case COM_PING:
return new MySQLComPingPacket();
case COM_RESET_CONNECTION:
return new MySQLComResetConnectionPacket();
default:
return new MySQLUnsupportedCommandPacket(commandPacketType);
}
}
|
@Test
void assertNewInstanceWithComSleepPacket() {
assertThat(MySQLCommandPacketFactory.newInstance(MySQLCommandPacketType.COM_SLEEP, payload, connectionSession), instanceOf(MySQLUnsupportedCommandPacket.class));
}
|
public int getIndex_head() {
return index_head;
}
|
@Test
public void testGetIndex_head() {
assertEquals(TestParameters.VP_INDEX_HEAD, chmItspHeader.getIndex_head());
}
|
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
User user = (User) o;
if (emailMe != user.emailMe) {
return false;
}
if (enabled != user.enabled) {
return false;
}
if (email != null ? !email.equals(user.email) : user.email != null) {
return false;
}
if (matcher != null ? !matcher.equals(user.matcher) : user.matcher != null) {
return false;
}
if (name != null ? !name.equals(user.name) : user.name != null) {
return false;
}
if (displayName != null ? !displayName.equals(user.displayName) : user.displayName != null) {
return false;
}
return true;
}
|
@Test
void shouldNotBeEqualIfFullNamesAreDifferent() {
assertThat(new User("user1", "moocow-user1", "moocow@example.com").equals(new User("user1", "moocow", "moocow@example.com"))).isFalse();
}
|
static String effectiveServices(File servicesFile, ZoneId zone, CloudName cloud, InstanceName instance, Tags tags) throws Exception {
Document processedServicesXml = new XmlPreProcessor(servicesFile.getParentFile(),
servicesFile,
instance,
zone.environment(),
zone.region(),
cloud,
tags)
.run();
Transformer transformer = TransformerFactory.newInstance().newTransformer();
transformer.setOutputProperty(OutputKeys.INDENT, "yes");
Writer writer = new StringWriter();
transformer.transform(new DOMSource(processedServicesXml), new StreamResult(writer));
return writer.toString().replaceAll("\n(\\s*\n)+","\n");
}
|
@Test
@DisplayName("when zone doesn't match any directives")
void prodUsWest1Services() throws Exception {
assertEquals(Files.readString(Paths.get("src/test/resources/effective-services/prod_us-west-1.xml")),
effectiveServices(servicesFile, ZoneId.from("prod", "us-west-1"), CloudName.DEFAULT, InstanceName.defaultName(), Tags.empty()));
}
|
@Override
public Optional<Decision> onMemoryUsageChanged(
int numTotalRequestedBuffers, int currentPoolSize) {
return numTotalRequestedBuffers < currentPoolSize * releaseThreshold
? Optional.of(Decision.NO_ACTION)
: Optional.empty();
}
|
@Test
void testOnUsedMemoryExceedThreshold() {
final int poolSize = 10;
final int threshold = (int) (poolSize * FULL_SPILL_RELEASE_THRESHOLD);
Optional<Decision> memoryUsageChangedDecision =
spillStrategy.onMemoryUsageChanged(threshold + 1, poolSize);
assertThat(memoryUsageChangedDecision).isNotPresent();
}
|
@Override
public void onPartitionsRevoked(final Collection<TopicPartition> partitions) {
log.debug("Current state {}: revoked partitions {} because of consumer rebalance.\n" +
"\tcurrently assigned active tasks: {}\n" +
"\tcurrently assigned standby tasks: {}\n",
streamThread.state(),
partitions,
taskManager.activeTaskIds(),
taskManager.standbyTaskIds());
// We need to still invoke handleRevocation if the thread has been told to shut down, but we shouldn't ever
// transition away from PENDING_SHUTDOWN once it's been initiated (to anything other than DEAD)
if ((streamThread.setState(State.PARTITIONS_REVOKED) != null || streamThread.state() == State.PENDING_SHUTDOWN) && !partitions.isEmpty()) {
final long start = time.milliseconds();
try {
taskManager.handleRevocation(partitions);
} finally {
log.info("partition revocation took {} ms.", time.milliseconds() - start);
}
}
}
|
@Test
public void shouldNotHandleEmptySetOfRevokedPartitions() {
when(streamThread.setState(State.PARTITIONS_REVOKED)).thenReturn(State.RUNNING);
streamsRebalanceListener.onPartitionsRevoked(Collections.emptyList());
verify(taskManager, never()).handleRevocation(any());
}
|
public boolean isChangeable() {
return changeable;
}
|
@Test
public void testIsChangeable_ShouldReturnCorrectChangeableStatus() {
assertTrue(attribute.isChangeable());
}
|
public JSONObject getPayloads() {
return this.payload.getClaimsJson();
}
|
@Test
public void getLongTest() {
final String rightToken = "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9"
+ ".eyJsb2dpblR5cGUiOiJsb2dpbiIsImxvZ2luSWQiOiJhZG1pbiIsImRldmljZSI6ImRlZmF1bHQtZGV2aWNlIiwiZWZmIjoxNjc4Mjg1NzEzOTM1LCJyblN0ciI6IkVuMTczWFhvWUNaaVZUWFNGOTNsN1pabGtOalNTd0pmIn0"
+ ".wRe2soTaWYPhwcjxdzesDi1BgEm9D61K-mMT3fPc4YM";
final JWT jwt = JWTUtil.parseToken(rightToken);
assertEquals(
"{\"loginType\":\"login\",\"loginId\":\"admin\",\"device\":\"default-device\"," +
"\"eff\":1678285713935,\"rnStr\":\"En173XXoYCZiVTXSF93l7ZZlkNjSSwJf\"}",
jwt.getPayloads().toString());
assertEquals(Long.valueOf(1678285713935L), jwt.getPayloads().getLong("eff"));
}
|
Converter<E> compile() {
head = tail = null;
for (Node n = top; n != null; n = n.next) {
switch (n.type) {
case Node.LITERAL:
addToList(new LiteralConverter<E>((String) n.getValue()));
break;
case Node.COMPOSITE_KEYWORD:
CompositeNode cn = (CompositeNode) n;
CompositeConverter<E> compositeConverter = createCompositeConverter(cn);
if (compositeConverter == null) {
addError("Failed to create converter for [%" + cn.getValue() + "] keyword");
addToList(new LiteralConverter<E>("%PARSER_ERROR[" + cn.getValue() + "]"));
break;
}
compositeConverter.setFormattingInfo(cn.getFormatInfo());
compositeConverter.setOptionList(cn.getOptions());
Compiler<E> childCompiler = new Compiler<E>(cn.getChildNode(), converterMap);
childCompiler.setContext(context);
Converter<E> childConverter = childCompiler.compile();
compositeConverter.setChildConverter(childConverter);
addToList(compositeConverter);
break;
case Node.SIMPLE_KEYWORD:
SimpleKeywordNode kn = (SimpleKeywordNode) n;
DynamicConverter<E> dynaConverter = createConverter(kn);
if (dynaConverter != null) {
dynaConverter.setFormattingInfo(kn.getFormatInfo());
dynaConverter.setOptionList(kn.getOptions());
addToList(dynaConverter);
} else {
// if the appropriate dynaconverter cannot be found, then replace
// it with a dummy LiteralConverter indicating an error.
Converter<E> errConveter = new LiteralConverter<E>("%PARSER_ERROR[" + kn.getValue() + "]");
addStatus(new ErrorStatus("[" + kn.getValue() + "] is not a valid conversion word", this));
addToList(errConveter);
}
}
}
return head;
}
|
@Test
public void converterStart() throws Exception {
{
Parser<Object> p = new Parser<Object>("abc %hello");
p.setContext(context);
Node t = p.parse();
Converter<Object> head = p.compile(t, converterMap);
String result = write(head, new Object());
assertEquals("abc Hello", result);
}
}
|
@SuppressWarnings("deprecation")
@VisibleForTesting
public String getWebSocketConsumeUri(String topic) {
String serviceURLWithoutTrailingSlash = serviceURL.substring(0,
serviceURL.endsWith("/") ? serviceURL.length() - 1 : serviceURL.length());
TopicName topicName = TopicName.get(topic);
String wsTopic;
if (topicName.isV2()) {
wsTopic = String.format("%s/%s/%s/%s", topicName.getDomain(), topicName.getTenant(),
topicName.getNamespacePortion(), topicName.getLocalName());
} else {
wsTopic = String.format("%s/%s/%s/%s/%s", topicName.getDomain(), topicName.getTenant(),
topicName.getCluster(), topicName.getNamespacePortion(), topicName.getLocalName());
}
String uriFormat = "%s/ws" + (topicName.isV2() ? "/v2/" : "/")
+ "consumer/%s/%s?subscriptionType=%s&subscriptionMode=%s";
return String.format(uriFormat, serviceURLWithoutTrailingSlash, wsTopic, subscriptionName,
subscriptionType.toString(), subscriptionMode.toString());
}
|
@Test
public void testGetWebSocketConsumeUri() {
String topicNameV1 = "persistent://public/cluster/default/issue-11067";
assertEquals(cmdConsume.getWebSocketConsumeUri(topicNameV1),
"ws://localhost:8080/ws/consumer/persistent/public/cluster/default/issue-11067/my-sub"
+ "?subscriptionType=Exclusive&subscriptionMode=Durable");
String topicNameV2 = "persistent://public/default/issue-11067";
assertEquals(cmdConsume.getWebSocketConsumeUri(topicNameV2),
"ws://localhost:8080/ws/v2/consumer/persistent/public/default/issue-11067/my-sub"
+ "?subscriptionType=Exclusive&subscriptionMode=Durable");
}
|
public static <T> String resolveUserDefinedValue(
Configuration flinkConfig,
ConfigOption<T> configOption,
String valueOfConfigOptionOrDefault,
@Nullable String valueOfPodTemplate,
String fieldDescription) {
final String resolvedValue;
if (valueOfPodTemplate != null) {
// The config option is explicitly set.
if (flinkConfig.contains(configOption)) {
resolvedValue = valueOfConfigOptionOrDefault;
LOG.info(
"The {} configured in pod template will be overwritten to '{}' "
+ "because of explicitly configured options.",
fieldDescription,
resolvedValue);
} else {
resolvedValue = valueOfPodTemplate;
}
} else {
resolvedValue = valueOfConfigOptionOrDefault;
}
return resolvedValue;
}
|
@Test
void testResolveUserDefinedValueWithDefinedInPodTemplateAndConfigOptionNotSet() {
final String imageInPodTemplate = "image-in-pod-template:v1";
final String resolvedImage =
KubernetesUtils.resolveUserDefinedValue(
new Configuration(),
KubernetesConfigOptions.CONTAINER_IMAGE,
CONTAINER_IMAGE,
imageInPodTemplate,
"container image");
assertThat(resolvedImage).isEqualTo(imageInPodTemplate);
}
|
@Udf(description = "Converts a TIMESTAMP value into the"
+ " string representation of the timestamp in the given format. Single quotes in the"
+ " timestamp format can be escaped with '', for example: 'yyyy-MM-dd''T''HH:mm:ssX'"
+ " The system default time zone is used when no time zone is explicitly provided."
+ " The format pattern should be in the format expected"
+ " by java.time.format.DateTimeFormatter")
public String formatTimestamp(
@UdfParameter(
description = "TIMESTAMP value.") final Timestamp timestamp,
@UdfParameter(
description = "The format pattern should be in the format expected by"
+ " java.time.format.DateTimeFormatter.") final String formatPattern) {
return formatTimestamp(timestamp, formatPattern, ZoneId.of("GMT").getId());
}
|
@Test
public void shouldReturnNullOnNullTimeZone() {
// When:
final String returnValue = udf.formatTimestamp( new Timestamp(1534353043000L), "yyyy-MM-dd", null);
// Then:
assertThat(returnValue, is(nullValue()));
}
|
@Override
public TenantPackageDO validTenantPackage(Long id) {
TenantPackageDO tenantPackage = tenantPackageMapper.selectById(id);
if (tenantPackage == null) {
throw exception(TENANT_PACKAGE_NOT_EXISTS);
}
if (tenantPackage.getStatus().equals(CommonStatusEnum.DISABLE.getStatus())) {
throw exception(TENANT_PACKAGE_DISABLE, tenantPackage.getName());
}
return tenantPackage;
}
|
@Test
public void testValidTenantPackage_notExists() {
// 准备参数
Long id = randomLongId();
// 调用, 并断言异常
assertServiceException(() -> tenantPackageService.validTenantPackage(id), TENANT_PACKAGE_NOT_EXISTS);
}
|
public FileInputStream openInputStream(File file) {
try {
return openInputStreamOrThrowIOE(file);
} catch (IOException e) {
throw new IllegalStateException("Can not open file " + file, e);
}
}
|
@Test
public void openInputStream_opens_existing_file() throws Exception {
File file = temp.newFile();
FileUtils.write(file, "foo");
try (FileInputStream input = underTest.openInputStream(file)) {
assertThat(IOUtils.toString(input)).isEqualTo("foo");
}
}
|
public static <T> T checkNotNullArrayParam(T value, int index, String name) throws IllegalArgumentException {
if (value == null) {
throw new IllegalArgumentException(
"Array index " + index + " of parameter '" + name + "' must not be null");
}
return value;
}
|
@Test
public void testCheckNotNullArrayParam() {
Exception actualEx = null;
try {
ObjectUtil.checkNotNullArrayParam(NON_NULL_OBJECT, 1, NON_NULL_NAME);
} catch (Exception e) {
actualEx = e;
}
assertNull(actualEx, TEST_RESULT_NULLEX_NOK);
actualEx = null;
try {
ObjectUtil.checkNotNullArrayParam(NULL_OBJECT, 1, NULL_NAME);
} catch (Exception e) {
actualEx = e;
}
assertNotNull(actualEx, TEST_RESULT_NULLEX_OK);
assertTrue(actualEx instanceof IllegalArgumentException, TEST_RESULT_EXTYPE_NOK);
}
|
@Override
public String deserializeResponse(String answer) throws TelegramApiRequestException {
return deserializeResponse(answer, String.class);
}
|
@Test
public void testCreateInvoiceLinkDeserializeValidResponse(){
String responseText = "{\n" +
" \"ok\": true,\n" +
" \"result\": \"https://t.me/testlink\" \n" +
"}";
CreateInvoiceLink createInvoiceLink = createSendInvoiceObject();
try {
String link = createInvoiceLink.deserializeResponse(responseText);
assertEquals("https://t.me/testlink",link);
} catch (TelegramApiRequestException e) {
fail(e.getMessage());
}
}
|
@Override
public void writeShort(final int v) throws IOException {
ensureAvailable(SHORT_SIZE_IN_BYTES);
Bits.writeShort(buffer, pos, (short) v, isBigEndian);
pos += SHORT_SIZE_IN_BYTES;
}
|
@Test
public void testWriteShortForPositionV() throws Exception {
short expected = 100;
out.writeShort(2, expected);
short actual = Bits.readShortB(out.buffer, 2);
assertEquals(expected, actual);
}
|
@Override
public double logp(double x) {
if (x < 0) {
return Double.NEGATIVE_INFINITY;
} else {
return Math.log(lambda) - lambda * x;
}
}
|
@Test
public void testLogP() {
System.out.println("logP");
ExponentialDistribution instance = new ExponentialDistribution(2.0);
instance.rand();
assertTrue(Double.isInfinite(instance.logp(-0.1)));
assertEquals(0.6931472, instance.logp(0.0), 1E-6);
assertEquals(-1.306853, instance.logp(1.0), 1E-6);
assertEquals(-3.306853, instance.logp(2.0), 1E-6);
assertEquals(-5.306853, instance.logp(3.0), 1E-6);
assertEquals(-7.306853, instance.logp(4.0), 1E-6);
}
|
public static Configuration adjustForLocalExecution(Configuration config) {
UNUSED_CONFIG_OPTIONS.forEach(
option -> warnAndRemoveOptionHasNoEffectIfSet(config, option));
setConfigOptionToPassedMaxIfNotSet(
config, TaskManagerOptions.CPU_CORES, LOCAL_EXECUTION_CPU_CORES);
setConfigOptionToPassedMaxIfNotSet(
config, TaskManagerOptions.TASK_HEAP_MEMORY, LOCAL_EXECUTION_TASK_MEMORY);
setConfigOptionToPassedMaxIfNotSet(
config, TaskManagerOptions.TASK_OFF_HEAP_MEMORY, LOCAL_EXECUTION_TASK_MEMORY);
adjustNetworkMemoryForLocalExecution(config);
setConfigOptionToDefaultIfNotSet(
config, TaskManagerOptions.MANAGED_MEMORY_SIZE, DEFAULT_MANAGED_MEMORY_SIZE);
// Set valid default values for unused config options which should have been removed.
config.set(
TaskManagerOptions.FRAMEWORK_HEAP_MEMORY,
TaskManagerOptions.FRAMEWORK_HEAP_MEMORY.defaultValue());
config.set(
TaskManagerOptions.FRAMEWORK_OFF_HEAP_MEMORY,
TaskManagerOptions.FRAMEWORK_OFF_HEAP_MEMORY.defaultValue());
config.set(
TaskManagerOptions.JVM_METASPACE, TaskManagerOptions.JVM_METASPACE.defaultValue());
config.set(
TaskManagerOptions.JVM_OVERHEAD_MAX,
TaskManagerOptions.JVM_OVERHEAD_MAX.defaultValue());
config.set(
TaskManagerOptions.JVM_OVERHEAD_MIN,
TaskManagerOptions.JVM_OVERHEAD_MAX.defaultValue());
return config;
}
|
@Test
void testAdjustForLocalExecution() {
Configuration configuration =
TaskExecutorResourceUtils.adjustForLocalExecution(new Configuration());
assertThat(configuration.get(TaskManagerOptions.NETWORK_MEMORY_MIN))
.isEqualTo(TaskExecutorResourceUtils.DEFAULT_SHUFFLE_MEMORY_SIZE);
assertThat(configuration.get(TaskManagerOptions.NETWORK_MEMORY_MAX))
.isEqualTo(TaskExecutorResourceUtils.DEFAULT_SHUFFLE_MEMORY_SIZE);
assertThat(configuration.get(TaskManagerOptions.MANAGED_MEMORY_SIZE))
.isEqualTo(TaskExecutorResourceUtils.DEFAULT_MANAGED_MEMORY_SIZE);
}
|
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) {
return api.send(request);
}
|
@Test
public void sendPollWithKeyboard() {
String question = "Question ?";
String[] answers = {"Answer 1", "Answer 2"};
long closeDate = System.currentTimeMillis() / 1000 + 500;
SendResponse sendResponse = bot.execute(
new SendPoll(chatId, question, answers)
.type("regular")
.allowsMultipleAnswers(true)
.replyMarkup(new ReplyKeyboardMarkup(
new KeyboardButton("all polls").requestPoll(new KeyboardButtonPollType()),
new KeyboardButton("quiz").requestPoll(new KeyboardButtonPollType(Poll.Type.quiz)),
new KeyboardButton("regular").requestPoll(new KeyboardButtonPollType("regular"))))
.closeDate(closeDate)
);
Poll poll = sendResponse.message().poll();
assertEquals(question, poll.question());
assertEquals(answers.length, poll.options().length);
assertTrue(poll.isAnonymous());
assertEquals(poll.totalVoterCount(), Integer.valueOf(0));
assertEquals(poll.type(), Poll.Type.regular);
assertTrue(poll.allowsMultipleAnswers());
assertEquals(closeDate, poll.closeDate().longValue());
}
|
@Override
public Mono<Void> writeWith(final ServerWebExchange exchange, final ShenyuPluginChain chain) {
return chain.execute(exchange).then(Mono.defer(() -> {
ServerHttpResponse response = exchange.getResponse();
ResponseEntity<Flux<DataBuffer>> fluxResponseEntity = exchange.getAttribute(Constants.CLIENT_RESPONSE_ATTR);
if (Objects.isNull(fluxResponseEntity)) {
Object error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.SERVICE_RESULT_ERROR);
return WebFluxResultUtils.result(exchange, error);
}
this.redrawResponseHeaders(response, fluxResponseEntity);
Mono<Void> responseMono;
if (Objects.nonNull(fluxResponseEntity.getBody())) {
responseMono = exchange.getResponse().writeWith(fluxResponseEntity.getBody())
.onErrorResume(error -> releaseIfNotConsumed(fluxResponseEntity.getBody(), error))
.doOnCancel(() -> clean(exchange));
} else {
responseMono = exchange.getResponse().writeWith(Mono.empty());
}
exchange.getAttributes().put(Constants.RESPONSE_MONO, responseMono);
// watcher httpStatus
final Consumer<HttpStatusCode> consumer = exchange.getAttribute(Constants.WATCHER_HTTP_STATUS);
Optional.ofNullable(consumer).ifPresent(c -> c.accept(response.getStatusCode()));
return responseMono;
}));
}
|
@Test
public void testWriteWith() {
ServerWebExchange exchangeNormal = generateServerWebExchange(true);
exchangeNormal.getResponse().setStatusCode(HttpStatus.OK);
reset(chain);
when(chain.execute(exchangeNormal)).thenReturn(Mono.empty());
Mono<Void> monoSuccess = webClientMessageWriter.writeWith(exchangeNormal, chain);
StepVerifier.create(monoSuccess).expectSubscription().verifyComplete();
ServerWebExchange exchangeNullResponse = generateServerWebExchange(false);
reset(chain);
when(chain.execute(exchangeNullResponse)).thenReturn(Mono.empty());
Mono<Void> monoNullResponse = webClientMessageWriter.writeWith(exchangeNullResponse, chain);
StepVerifier.create(monoNullResponse).expectSubscription().verifyComplete();
ServerWebExchange exchangeInternalServerError = generateServerWebExchange(true);
exchangeInternalServerError.getResponse().setStatusCode(HttpStatus.INTERNAL_SERVER_ERROR);
reset(chain);
when(chain.execute(exchangeInternalServerError)).thenReturn(Mono.empty());
Mono<Void> monoInternalServerError = webClientMessageWriter.writeWith(exchangeInternalServerError, chain);
StepVerifier.create(monoInternalServerError).expectSubscription().verifyComplete();
ServerWebExchange exchangeBadGateway = generateServerWebExchange(true);
exchangeBadGateway.getResponse().setStatusCode(HttpStatus.BAD_GATEWAY);
reset(chain);
when(chain.execute(exchangeBadGateway)).thenReturn(Mono.empty());
Mono<Void> monoBadGateway = webClientMessageWriter.writeWith(exchangeBadGateway, chain);
StepVerifier.create(monoBadGateway).expectSubscription().verifyComplete();
ServerWebExchange exchangeGatewayTimeout = generateServerWebExchange(true);
exchangeGatewayTimeout.getResponse().setStatusCode(HttpStatus.GATEWAY_TIMEOUT);
reset(chain);
when(chain.execute(exchangeGatewayTimeout)).thenReturn(Mono.empty());
Mono<Void> monoGatewayTimeout = webClientMessageWriter.writeWith(exchangeGatewayTimeout, chain);
StepVerifier.create(monoGatewayTimeout).expectSubscription().verifyComplete();
}
|
public static FallbackMethod create(String fallbackMethodName, Method originalMethod,
Object[] args, Object original, Object proxy) throws NoSuchMethodException {
MethodMeta methodMeta = new MethodMeta(
fallbackMethodName,
originalMethod.getParameterTypes(),
originalMethod.getReturnType(),
original.getClass());
Map<Class<?>, Method> methods = FALLBACK_METHODS_CACHE
.computeIfAbsent(methodMeta, FallbackMethod::extractMethods);
if (!methods.isEmpty()) {
return new FallbackMethod(methods, originalMethod.getReturnType(), args, original, proxy);
} else {
throw new NoSuchMethodException(String.format("%s %s.%s(%s,%s)",
methodMeta.returnType, methodMeta.targetClass, methodMeta.fallbackMethodName,
StringUtils.arrayToDelimitedString(methodMeta.params, ","), Throwable.class));
}
}
|
@Test
public void mismatchReturnType_shouldThrowNoSuchMethodException() throws Throwable {
FallbackMethodTest target = new FallbackMethodTest();
Method testMethod = target.getClass().getMethod("testMethod", String.class);
assertThatThrownBy(() -> FallbackMethod
.create("duplicateException", testMethod, new Object[]{"test"}, target, target))
.isInstanceOf(IllegalStateException.class)
.hasMessage(
"You have more that one fallback method that cover the same exception type java.lang.IllegalArgumentException");
}
|
public synchronized void synchronizePartitionSchemas( PartitionSchema partitionSchema ) {
synchronizePartitionSchemas( partitionSchema, partitionSchema.getName() );
}
|
@Test
public void synchronizePartitionSchemas_use_case_sensitive_name() throws Exception {
TransMeta transformarion1 = createTransMeta();
PartitionSchema partitionSchema1 = createPartitionSchema( "PartitionSchema", true );
transformarion1.setPartitionSchemas( Collections.singletonList( partitionSchema1 ) );
spoonDelegates.trans.addTransformation( transformarion1 );
TransMeta transformarion2 = createTransMeta();
PartitionSchema partitionSchema2 = createPartitionSchema( "Partitionschema", true );
transformarion2.setPartitionSchemas( Collections.singletonList( partitionSchema2 ) );
spoonDelegates.trans.addTransformation( transformarion2 );
partitionSchema2.setNumberOfPartitionsPerSlave( AFTER_SYNC_VALUE );
sharedUtil.synchronizePartitionSchemas( partitionSchema2 );
assertThat( partitionSchema1.getNumberOfPartitionsPerSlave(), equalTo( BEFORE_SYNC_VALUE ) );
}
|
public static Element createElement(Node node, String name, String value, Map<String, Object> attributes) {
Document doc = node.getNodeType() == Node.DOCUMENT_NODE ? (Document) node : node.getOwnerDocument();
Element element = doc.createElement(name);
element.setTextContent(value);
addAttributes(element, attributes);
return element;
}
|
@Test
void testCreateElement() {
Node node = XmlUtils.createElement(getDocument(), "foo", "bar", null);
String result = XmlUtils.toString(node);
assertEquals(result, "<foo>bar</foo>");
}
|
private CompletionStage<RestResponse> backupStatus(RestRequest request) {
return statusOperation(request, SITES_STATUS);
}
|
@Test
public void testInvalidSite() {
RestClient client = clientPerSite.get(LON);
RestCacheClient cache = client.cache(CACHE_1);
assertStatus(404, cache.backupStatus("invalid-site"));
}
|
@Override
@SuppressWarnings("rawtypes")
public void report(SortedMap<String, Gauge> gauges, SortedMap<String, Counter> counters,
SortedMap<String, Histogram> histograms, SortedMap<String, Meter> meters, SortedMap<String, Timer> timers) {
MetaData.Builder metaData = new MetaData.Builder(sanitize, hostName, clock.getTime() / 1000, period)
.type(COLLECTD_TYPE_GAUGE);
try {
connect(sender);
for (Map.Entry<String, Gauge> entry : gauges.entrySet()) {
serializeGauge(metaData.plugin(entry.getKey()), entry.getValue());
}
for (Map.Entry<String, Counter> entry : counters.entrySet()) {
serializeCounter(metaData.plugin(entry.getKey()), entry.getValue());
}
for (Map.Entry<String, Histogram> entry : histograms.entrySet()) {
serializeHistogram(metaData.plugin(entry.getKey()), entry.getValue());
}
for (Map.Entry<String, Meter> entry : meters.entrySet()) {
serializeMeter(metaData.plugin(entry.getKey()), entry.getValue());
}
for (Map.Entry<String, Timer> entry : timers.entrySet()) {
serializeTimer(metaData.plugin(entry.getKey()), entry.getValue());
}
} catch (IOException e) {
LOG.warn("Unable to report to Collectd", e);
} finally {
disconnect(sender);
}
}
|
@Test
public void reportsMeters() throws Exception {
Meter meter = mock(Meter.class);
when(meter.getCount()).thenReturn(1L);
when(meter.getOneMinuteRate()).thenReturn(2.0);
when(meter.getFiveMinuteRate()).thenReturn(3.0);
when(meter.getFifteenMinuteRate()).thenReturn(4.0);
when(meter.getMeanRate()).thenReturn(5.0);
reporter.report(
map(),
map(),
map(),
map("api.rest.requests", meter),
map());
assertThat(nextValues(receiver)).containsExactly(1d);
assertThat(nextValues(receiver)).containsExactly(2d);
assertThat(nextValues(receiver)).containsExactly(3d);
assertThat(nextValues(receiver)).containsExactly(4d);
assertThat(nextValues(receiver)).containsExactly(5d);
}
|
@Override
public PageResult<BrokerageRecordDO> getBrokerageRecordPage(BrokerageRecordPageReqVO pageReqVO) {
return brokerageRecordMapper.selectPage(pageReqVO);
}
|
@Test
@Disabled // TODO 请修改 null 为需要的值,然后删除 @Disabled 注解
public void testGetBrokerageRecordPage() {
// mock 数据
BrokerageRecordDO dbBrokerageRecord = randomPojo(BrokerageRecordDO.class, o -> { // 等会查询到
o.setUserId(null);
o.setBizType(null);
o.setStatus(null);
o.setCreateTime(null);
});
brokerageRecordMapper.insert(dbBrokerageRecord);
// 测试 userId 不匹配
brokerageRecordMapper.insert(cloneIgnoreId(dbBrokerageRecord, o -> o.setUserId(null)));
// 测试 bizType 不匹配
brokerageRecordMapper.insert(cloneIgnoreId(dbBrokerageRecord, o -> o.setBizType(null)));
// 测试 status 不匹配
brokerageRecordMapper.insert(cloneIgnoreId(dbBrokerageRecord, o -> o.setStatus(null)));
// 测试 createTime 不匹配
brokerageRecordMapper.insert(cloneIgnoreId(dbBrokerageRecord, o -> o.setCreateTime(null)));
// 准备参数
BrokerageRecordPageReqVO reqVO = new BrokerageRecordPageReqVO();
reqVO.setUserId(null);
reqVO.setBizType(null);
reqVO.setStatus(null);
reqVO.setCreateTime(buildBetweenTime(2023, 2, 1, 2023, 2, 28));
// 调用
PageResult<BrokerageRecordDO> pageResult = brokerageRecordService.getBrokerageRecordPage(reqVO);
// 断言
assertEquals(1, pageResult.getTotal());
assertEquals(1, pageResult.getList().size());
assertPojoEquals(dbBrokerageRecord, pageResult.getList().get(0));
}
|
public static void main(String[] args) throws Exception {
Options cliOptions = CliFrontendOptions.initializeOptions();
CommandLineParser parser = new DefaultParser();
CommandLine commandLine = parser.parse(cliOptions, args);
// Help message
if (args.length == 0 || commandLine.hasOption(CliFrontendOptions.HELP)) {
HelpFormatter formatter = new HelpFormatter();
formatter.setLeftPadding(4);
formatter.setWidth(80);
formatter.printHelp(" ", cliOptions);
return;
}
// Create executor and execute the pipeline
PipelineExecution.ExecutionInfo result = createExecutor(commandLine).run();
// Print execution result
printExecutionInfo(result);
}
|
@Test
void testGeneratingHelpMessage() throws Exception {
CliFrontend.main(new String[] {"--help"});
assertThat(out.toString()).isEqualTo(HELP_MESSAGE);
assertThat(err.toString()).isEmpty();
}
|
@Override
public TableStatistics getTableStatistics(
ConnectorSession session,
SchemaTableName table,
Map<String, ColumnHandle> columns,
Map<String, Type> columnTypes,
List<HivePartition> partitions)
{
if (!isStatisticsEnabled(session)) {
return TableStatistics.empty();
}
if (partitions.isEmpty()) {
return createZeroStatistics(columns, columnTypes);
}
int sampleSize = getPartitionStatisticsSampleSize(session);
List<HivePartition> partitionsSample = getPartitionsSample(partitions, sampleSize);
try {
Map<String, PartitionStatistics> statisticsSample = statisticsProvider.getPartitionsStatistics(session, table, partitionsSample);
validatePartitionStatistics(table, statisticsSample);
return getTableStatistics(columns, columnTypes, partitions, statisticsSample);
}
catch (PrestoException e) {
if (e.getErrorCode().equals(HIVE_CORRUPTED_COLUMN_STATISTICS.toErrorCode()) && isIgnoreCorruptedStatistics(session)) {
log.error(e);
return TableStatistics.empty();
}
throw e;
}
}
|
@Test
public void testGetTableStatisticsEmpty()
{
String partitionName = "p1=string1/p2=1234";
MetastoreHiveStatisticsProvider statisticsProvider = new MetastoreHiveStatisticsProvider((session, table, hivePartitions) -> ImmutableMap.of(partitionName, PartitionStatistics.empty()), quickStatsProvider);
TestingConnectorSession session = new TestingConnectorSession(new HiveSessionProperties(
new HiveClientConfig(),
new OrcFileWriterConfig(),
new ParquetFileWriterConfig(),
new CacheConfig()).getSessionProperties());
assertEquals(
statisticsProvider.getTableStatistics(
session,
TABLE,
ImmutableMap.of(),
ImmutableMap.of(),
ImmutableList.of(partition(partitionName))),
TableStatistics.empty());
}
|
@Override
public void checkAuthorization(
final KsqlSecurityContext securityContext,
final MetaStore metaStore,
final Statement statement
) {
if (statement instanceof Query) {
validateQuery(securityContext, metaStore, (Query)statement);
} else if (statement instanceof InsertInto) {
validateInsertInto(securityContext, metaStore, (InsertInto)statement);
} else if (statement instanceof CreateAsSelect) {
validateCreateAsSelect(securityContext, metaStore, (CreateAsSelect)statement);
} else if (statement instanceof PrintTopic) {
validatePrintTopic(securityContext, (PrintTopic)statement);
} else if (statement instanceof CreateSource) {
validateCreateSource(securityContext, (CreateSource)statement);
}
}
|
@Test
public void shouldJoinSelectWithReadPermissionsAllowed() {
// Given:
final Statement statement = givenStatement(String.format(
"SELECT * FROM %s A JOIN %s B ON A.F1 = B.F1;", KAFKA_STREAM_TOPIC, AVRO_STREAM_TOPIC)
);
// When/Then:
authorizationValidator.checkAuthorization(securityContext, metaStore, statement);
}
|
public static void delete(File fileOrDir) throws IOException {
if (fileOrDir == null) {
return;
}
if (fileOrDir.isDirectory()) {
cleanDirectory(fileOrDir);
}
fileOrDir.delete();
}
|
@Test
public void testDelete() throws Exception {
for (int i = 0; i < 10; i++) {
IOTinyUtils.writeStringToFile(new File(testRootDir, "testDelete" + i), "testCleanDirectory", StandardCharsets.UTF_8.name());
}
File dir = new File(testRootDir);
assertTrue(dir.exists() && dir.isDirectory());
assertTrue(dir.listFiles().length > 0);
IOTinyUtils.delete(new File(testRootDir));
assertTrue(!dir.exists());
}
|
public ArtifactResponse buildArtifactResponse(ArtifactResolveRequest artifactResolveRequest, String entityId, SignType signType) throws InstantiationException, ValidationException, ArtifactBuildException, BvdException {
final var artifactResponse = OpenSAMLUtils.buildSAMLObject(ArtifactResponse.class);
final var status = OpenSAMLUtils.buildSAMLObject(Status.class);
final var statusCode = OpenSAMLUtils.buildSAMLObject(StatusCode.class);
final var issuer = OpenSAMLUtils.buildSAMLObject(Issuer.class);
return ArtifactResponseBuilder
.newInstance(artifactResponse)
.addID()
.addIssueInstant()
.addInResponseTo(artifactResolveRequest.getArtifactResolve().getID())
.addStatus(StatusBuilder
.newInstance(status)
.addStatusCode(statusCode, StatusCode.SUCCESS)
.build())
.addIssuer(issuer, entityId)
.addMessage(buildResponse(artifactResolveRequest, entityId, signType))
.addSignature(signatureService, signType)
.build();
}
|
@Test
void parseArtifactResolveSuccessPseudonym() throws ValidationException, SamlParseException, ArtifactBuildException, BvdException, InstantiationException {
ArtifactResponse artifactResponse = artifactResponseService.buildArtifactResponse(getArtifactResolveRequest("success", true, false,SAML_COMBICONNECT, EncryptionType.PSEUDONIEM, ENTRANCE_ENTITY_ID), ENTRANCE_ENTITY_ID, TD);
assertEquals("urn:oasis:names:tc:SAML:2.0:status:Success", ((Response) artifactResponse.getMessage()).getStatus().getStatusCode().getValue());
}
|
@NonNull
public static VersionNumber getMinimumSupportedVersion() {
return MINIMUM_SUPPORTED_VERSION;
}
|
@Test
public void shouldLoadMinimumSupportedVersionByDefault() {
assertThat("Remoting Minimum supported version is not defined",
RemotingVersionInfo.getMinimumSupportedVersion(), notNullValue());
}
|
int punctuate() {
int punctuated = 0;
for (final Task task : tasks.activeTasks()) {
try {
if (executionMetadata.canPunctuateTask(task)) {
if (task.maybePunctuateStreamTime()) {
punctuated++;
}
if (task.maybePunctuateSystemTime()) {
punctuated++;
}
}
} catch (final TaskMigratedException e) {
log.info("Failed to punctuate stream task {} since it got migrated to another thread already. " +
"Will trigger a new rebalance and close all tasks as zombies together.", task.id());
throw e;
} catch (final StreamsException e) {
log.error("Failed to punctuate stream task {} due to the following error:", task.id(), e);
e.setTaskId(task.id());
throw e;
} catch (final KafkaException e) {
log.error("Failed to punctuate stream task {} due to the following error:", task.id(), e);
throw new StreamsException(e, task.id());
}
}
return punctuated;
}
|
@Test
public void testPunctuateWithPause() {
final Tasks tasks = mock(Tasks.class);
final TaskManager taskManager = mock(TaskManager.class);
final TaskExecutionMetadata metadata = mock(TaskExecutionMetadata.class);
final TaskExecutor taskExecutor = new TaskExecutor(tasks, taskManager, metadata, new LogContext());
taskExecutor.punctuate();
verify(tasks).activeTasks();
}
|
public Schema getSchema() {
return context.getSchema();
}
|
@Test
public void testNonContiguousOneOfSchema() {
ProtoDynamicMessageSchema schemaProvider =
schemaFromDescriptor(NonContiguousOneOf.getDescriptor());
Schema schema = schemaProvider.getSchema();
assertEquals(NONCONTIGUOUS_ONEOF_SCHEMA, schema);
}
|
static boolean passSingleValueCheck(ResourceWrapper resourceWrapper, ParamFlowRule rule, int acquireCount,
Object value) {
if (rule.getGrade() == RuleConstant.FLOW_GRADE_QPS) {
if (rule.getControlBehavior() == RuleConstant.CONTROL_BEHAVIOR_RATE_LIMITER) {
return passThrottleLocalCheck(resourceWrapper, rule, acquireCount, value);
} else {
return passDefaultLocalCheck(resourceWrapper, rule, acquireCount, value);
}
} else if (rule.getGrade() == RuleConstant.FLOW_GRADE_THREAD) {
Set<Object> exclusionItems = rule.getParsedHotItems().keySet();
long threadCount = getParameterMetric(resourceWrapper).getThreadCount(rule.getParamIdx(), value);
if (exclusionItems.contains(value)) {
int itemThreshold = rule.getParsedHotItems().get(value);
return ++threadCount <= itemThreshold;
}
long threshold = (long) rule.getCount();
return ++threadCount <= threshold;
}
return true;
}
|
@Test
public void testSingleValueCheckQpsWithExceptionItems() throws InterruptedException {
final String resourceName = "testSingleValueCheckQpsWithExceptionItems";
final ResourceWrapper resourceWrapper = new StringResourceWrapper(resourceName, EntryType.IN);
TimeUtil.currentTimeMillis();
int paramIdx = 0;
long globalThreshold = 5L;
int thresholdB = 0;
int thresholdD = 7;
ParamFlowRule rule = new ParamFlowRule();
rule.setResource(resourceName);
rule.setCount(globalThreshold);
rule.setParamIdx(paramIdx);
rule.setControlBehavior(RuleConstant.CONTROL_BEHAVIOR_RATE_LIMITER);
String valueA = "valueA";
String valueB = "valueB";
String valueC = "valueC";
String valueD = "valueD";
// Directly set parsed map for test.
Map<Object, Integer> map = new HashMap<Object, Integer>();
map.put(valueB, thresholdB);
map.put(valueD, thresholdD);
rule.setParsedHotItems(map);
ParameterMetric metric = new ParameterMetric();
ParameterMetricStorage.getMetricsMap().put(resourceWrapper.getName(), metric);
metric.getRuleTimeCounterMap().put(rule, new ConcurrentLinkedHashMapWrapper<Object, AtomicLong>(4000));
assertTrue(ParamFlowChecker.passSingleValueCheck(resourceWrapper, rule, 1, valueA));
assertFalse(ParamFlowChecker.passSingleValueCheck(resourceWrapper, rule, 1, valueB));
TimeUnit.SECONDS.sleep(3);
}
|
public OffsetFetchResponseData.OffsetFetchResponseGroup fetchAllOffsets(
OffsetFetchRequestData.OffsetFetchRequestGroup request,
long lastCommittedOffset
) throws ApiException {
final boolean requireStable = lastCommittedOffset == Long.MAX_VALUE;
try {
validateOffsetFetch(request, lastCommittedOffset);
} catch (GroupIdNotFoundException ex) {
return new OffsetFetchResponseData.OffsetFetchResponseGroup()
.setGroupId(request.groupId())
.setTopics(Collections.emptyList());
}
final List<OffsetFetchResponseData.OffsetFetchResponseTopics> topicResponses = new ArrayList<>();
final TimelineHashMap<String, TimelineHashMap<Integer, OffsetAndMetadata>> groupOffsets =
offsets.offsetsByGroup.get(request.groupId(), lastCommittedOffset);
if (groupOffsets != null) {
groupOffsets.entrySet(lastCommittedOffset).forEach(topicEntry -> {
final String topic = topicEntry.getKey();
final TimelineHashMap<Integer, OffsetAndMetadata> topicOffsets = topicEntry.getValue();
final OffsetFetchResponseData.OffsetFetchResponseTopics topicResponse =
new OffsetFetchResponseData.OffsetFetchResponseTopics().setName(topic);
topicResponses.add(topicResponse);
topicOffsets.entrySet(lastCommittedOffset).forEach(partitionEntry -> {
final int partition = partitionEntry.getKey();
final OffsetAndMetadata offsetAndMetadata = partitionEntry.getValue();
if (requireStable && hasPendingTransactionalOffsets(request.groupId(), topic, partition)) {
topicResponse.partitions().add(new OffsetFetchResponseData.OffsetFetchResponsePartitions()
.setPartitionIndex(partition)
.setErrorCode(Errors.UNSTABLE_OFFSET_COMMIT.code())
.setCommittedOffset(INVALID_OFFSET)
.setCommittedLeaderEpoch(-1)
.setMetadata(""));
} else {
topicResponse.partitions().add(new OffsetFetchResponseData.OffsetFetchResponsePartitions()
.setPartitionIndex(partition)
.setCommittedOffset(offsetAndMetadata.committedOffset)
.setCommittedLeaderEpoch(offsetAndMetadata.leaderEpoch.orElse(-1))
.setMetadata(offsetAndMetadata.metadata));
}
});
});
}
return new OffsetFetchResponseData.OffsetFetchResponseGroup()
.setGroupId(request.groupId())
.setTopics(topicResponses);
}
|
@Test
public void testGenericGroupFetchAllOffsetsWithDeadGroup() {
OffsetMetadataManagerTestContext context = new OffsetMetadataManagerTestContext.Builder().build();
// Create a dead group.
ClassicGroup group = context.groupMetadataManager.getOrMaybeCreateClassicGroup(
"group",
true
);
group.transitionTo(ClassicGroupState.DEAD);
assertEquals(Collections.emptyList(), context.fetchAllOffsets("group", Long.MAX_VALUE));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.