focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@JsonIgnore
public Map<String, StepTransition> getDag() {
return steps.stream().collect(MapHelper.toListMap(Step::getId, Step::getTransition));
}
|
@Test
public void testGetDag() throws Exception {
Workflow wf =
loadObject(
"fixtures/workflows/definition/sample-active-wf-with-props.json",
WorkflowDefinition.class)
.getWorkflow();
assertEquals(
threeItemMap(
"job.1", wf.getSteps().get(0).getTransition(),
"job.2", wf.getSteps().get(1).getTransition(),
"job.3", wf.getSteps().get(2).getTransition()),
wf.getDag());
}
|
public Attributes readDataset() throws IOException {
return readDataset(o -> false);
}
|
@Test(expected = EOFException.class)
public void testNoOutOfMemoryErrorOnInvalidLength() throws IOException {
byte[] b = { 8, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 'e', 'v', 'i', 'l', 'l', 'e', 'n', 'g', 'h' };
try ( DicomInputStream in = new DicomInputStream(new ByteArrayInputStream(b))) {
in.readDataset();
}
}
|
public Marshaller createMarshaller(Class<?> clazz) throws JAXBException {
Marshaller marshaller = getContext(clazz).createMarshaller();
setMarshallerProperties(marshaller);
if (marshallerEventHandler != null) {
marshaller.setEventHandler(marshallerEventHandler);
}
marshaller.setSchema(marshallerSchema);
return marshaller;
}
|
@Test
void buildsMarshallerWithNoNamespaceSchemaLocationProperty() throws Exception {
JAXBContextFactory factory =
new JAXBContextFactory.Builder()
.withMarshallerNoNamespaceSchemaLocation("http://apihost/schema.xsd").build();
Marshaller marshaller = factory.createMarshaller(Object.class);
assertThat(marshaller.getProperty(Marshaller.JAXB_NO_NAMESPACE_SCHEMA_LOCATION))
.isEqualTo("http://apihost/schema.xsd");
}
|
@Override
public void upgrade() {
if (configService.get(MigrationCompleted.class) != null) {
LOG.debug("Migration already completed.");
return;
}
var previousMigration = Optional.ofNullable(configService.get(V20191219090834_AddSourcesPage.MigrationCompleted.class));
var previousInstallation = previousMigration.flatMap(this::previousInstallation);
var notPreviouslyInstalled = previousInstallation.isEmpty();
final ContentPack contentPack = readContentPack();
var contentPackShouldBeUninstalled = previousInstallation
.filter(this::userHasNotModifiedSourcesPage);
var notLocallyModified = contentPackShouldBeUninstalled.isPresent();
var previousDashboard = contentPackShouldBeUninstalled.flatMap(this::dashboardFromInstallation);
var pack = insertContentPack(contentPack)
.orElseThrow(() -> {
configService.write(MigrationCompleted.create(contentPack.id().toString(), false, false));
return new ContentPackException("Content pack " + contentPack.id() + " with this revision " + contentPack.revision() + " already found!");
});
contentPackShouldBeUninstalled.ifPresent(this::uninstallContentPack);
if (notPreviouslyInstalled || notLocallyModified) {
var newInstallation = installContentPack(pack);
assert (newInstallation != null);
previousDashboard.ifPresent(dashboard -> fixupNewDashboardId(dashboard, newInstallation));
} else {
notificationService.publishIfFirst(notificationService.buildNow()
.addType(Notification.Type.GENERIC)
.addSeverity(Notification.Severity.NORMAL)
.addDetail("title", "Updating Sources Dashboard")
.addDetail("description", """
While updating the Sources Dashboard, it was detected that the previous version was modified locally. To save these modifications from getting lost,
a new version of the content pack containing the Sources Dashboard was uploaded, but not installed.
If you want to use the new version of the dashboard, you can go to "System" -> "Content Packs" -> "Sources Page Dashboard" and install version 2.
In addition, you can either keep your current "Sources" dashboard (having two "Sources" dashboards) or uninstall version 1 of the content pack to remove it.
"""));
}
configService.write(MigrationCompleted.create(pack.id().toString(), notPreviouslyInstalled || notLocallyModified, contentPackShouldBeUninstalled.isPresent()));
}
private void fixupNewDashboardId(Document previousDashboard, ContentPackInstallation newInstallation) {
var newDashboard = dashboardFromInstallation(newInstallation);
var previousDashboardId = previousDashboard.getObjectId("_id");
newDashboard.ifPresent(dashboard -> {
var newDashboardId = dashboard.getObjectId("_id");
dashboard.append("_id", previousDashboardId);
views.deleteOne(Filters.eq("_id", newDashboardId));
views.insertOne(dashboard);
contentPackInstallations.updateOne(Filters.eq("_id", newInstallation.id()), Updates.set("entities.0.id", previousDashboardId.toHexString()));
});
}
private Optional<ContentPackInstallation> previousInstallation(V20191219090834_AddSourcesPage.MigrationCompleted previousMigration) {
return Optional.ofNullable(previousMigration.contentPackId())
.map(id -> contentPackInstallationPersistenceService.findByContentPackId(ModelId.of(id)))
.flatMap(installations -> installations.stream()
.filter(installation -> installation.contentPackRevision() == 1
&& installation.createdBy().equals("admin")
&& installation.comment().equals("Add Sources Page"))
.findFirst());
}
private Optional<Document> dashboardFromInstallation(ContentPackInstallation installation) {
return Optional.ofNullable(installation.entities())
.flatMap(entities -> entities.stream().findFirst())
.map(Identified::id)
.map(ModelId::id)
.flatMap(dashboardId -> Optional.ofNullable(views.find(Filters.eq("_id", new ObjectId(dashboardId))).first()));
}
private boolean userHasNotModifiedSourcesPage(ContentPackInstallation previousInstallation) {
var previousDashboard = dashboardFromInstallation(previousInstallation)
.flatMap(dashboard -> Optional.ofNullable(dashboard.getString("search_id")))
.flatMap(searchId -> Optional.ofNullable(searches.find(Filters.eq("_id", new ObjectId(searchId))).first()));
var userHasModifiedSourcesPage = previousDashboard
.map(dashboard -> dashboard.getDate("created_at"))
.map(createdAt -> !createdAt.equals(UNMODIFIED_SOURCES_SEARCH_DATE))
.orElse(false);
return !userHasModifiedSourcesPage;
}
private ContentPackInstallation installContentPack(ContentPack contentPack) {
return contentPackService.installContentPack(contentPack, Collections.emptyMap(), "Add Sources Page V2", "admin");
}
private Optional<ContentPack> insertContentPack(ContentPack contentPack) {
return this.contentPackPersistenceService.insert(contentPack);
}
private void uninstallContentPack(ContentPackInstallation contentPackInstallation) {
contentPackPersistenceService.findByIdAndRevision(contentPackInstallation.contentPackId(), contentPackInstallation.contentPackRevision())
.ifPresent(contentPack -> contentPackService.uninstallContentPack(contentPack, contentPackInstallation));
}
private ContentPack readContentPack() {
try {
final URL contentPackURL = V20230601104500_AddSourcesPageV2.class.getResource("V20230601104500_AddSourcesPage_V2_Content_Pack.json");
return this.objectMapper.readValue(contentPackURL, ContentPack.class);
} catch (IOException e) {
throw new RuntimeException("Unable to read content pack source in migration: ", e);
}
}
@JsonAutoDetect
@AutoValue
@WithBeanGetter
public static abstract class MigrationCompleted {
@JsonProperty("content_pack_id")
public abstract String contentPackId();
@JsonProperty("installed_content_pack")
public abstract boolean installedContentPack();
@JsonProperty("uninstalled_previous_revision")
public abstract boolean uninstalledPreviousRevision();
@JsonCreator
public static MigrationCompleted create(@JsonProperty("content_pack_id") final String contentPackId,
@JsonProperty("installed_content_pack") boolean installedContentPack,
@JsonProperty("uninstalled_previous_revision") boolean uninstalledPreviousRevision) {
return new AutoValue_V20230601104500_AddSourcesPageV2_MigrationCompleted(contentPackId, installedContentPack, uninstalledPreviousRevision);
}
}
}
|
@Test
void alreadyMigrated() {
thisMigrationHasRun();
this.migration.upgrade();
verify(clusterConfigService, never()).get(V20191219090834_AddSourcesPage.MigrationCompleted.class);
verify(clusterConfigService, never()).write(any());
}
|
@Override
public void reportFailedCheckpoint(FailedCheckpointStats failed) {
statsReadWriteLock.lock();
try {
counts.incrementFailedCheckpoints();
history.replacePendingCheckpointById(failed);
dirty = true;
logCheckpointStatistics(failed);
if (checkpointStatsListener != null) {
checkpointStatsListener.onFailedCheckpoint();
}
} finally {
statsReadWriteLock.unlock();
}
}
|
@Test
void testCheckpointStatsListenerOnFailedCheckpoint() {
testCheckpointStatsListener(
(checkpointStatsTracker, pendingCheckpointStats) ->
checkpointStatsTracker.reportFailedCheckpoint(
pendingCheckpointStats.toFailedCheckpoint(
System.currentTimeMillis(), null)),
0,
1);
}
|
@Override
public void deleteCategory(Long id) {
// 校验分类是否存在
validateProductCategoryExists(id);
// 校验是否还有子分类
if (productCategoryMapper.selectCountByParentId(id) > 0) {
throw exception(CATEGORY_EXISTS_CHILDREN);
}
// 校验分类是否绑定了 SPU
Long spuCount = productSpuService.getSpuCountByCategoryId(id);
if (spuCount > 0) {
throw exception(CATEGORY_HAVE_BIND_SPU);
}
// 删除
productCategoryMapper.deleteById(id);
}
|
@Test
public void testDeleteCategory_notExists() {
// 准备参数
Long id = randomLongId();
// 调用, 并断言异常
assertServiceException(() -> productCategoryService.deleteCategory(id), CATEGORY_NOT_EXISTS);
}
|
public FEELFnResult<List<Object>> invoke(@ParameterName( "list" ) Object list) {
if ( list == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null"));
}
// spec requires us to return a new list
final List<Object> result = new ArrayList<>();
if ( list instanceof Collection ) {
for (Object o : (Collection) list) {
if ( !result.contains( o ) ) {
result.add(o);
}
}
} else {
result.add( list );
}
return FEELFnResult.ofResult( result );
}
|
@Test
void invokeEmptyList() {
FunctionTestUtil.assertResultList(distinctValuesFunction.invoke(Collections.emptyList()),
Collections.emptyList());
}
|
@Description("Returns the cardinality of the geometry collection")
@ScalarFunction("ST_NumGeometries")
@SqlType(INTEGER)
public static long stNumGeometries(@SqlType(GEOMETRY_TYPE_NAME) Slice input)
{
Geometry geometry = deserialize(input);
if (geometry.isEmpty()) {
return 0;
}
return geometry.getNumGeometries();
}
|
@Test
public void testSTNumGeometries()
{
assertSTNumGeometries("POINT EMPTY", 0);
assertSTNumGeometries("LINESTRING EMPTY", 0);
assertSTNumGeometries("POLYGON EMPTY", 0);
assertSTNumGeometries("MULTIPOINT EMPTY", 0);
assertSTNumGeometries("MULTILINESTRING EMPTY", 0);
assertSTNumGeometries("MULTIPOLYGON EMPTY", 0);
assertSTNumGeometries("GEOMETRYCOLLECTION EMPTY", 0);
assertSTNumGeometries("POINT (1 2)", 1);
assertSTNumGeometries("LINESTRING(77.29 29.07,77.42 29.26,77.27 29.31,77.29 29.07)", 1);
assertSTNumGeometries("POLYGON ((0 0, 0 1, 1 1, 1 0, 0 0))", 1);
assertSTNumGeometries("MULTIPOINT (1 2, 2 4, 3 6, 4 8)", 4);
assertSTNumGeometries("MULTILINESTRING ((1 1, 5 1), (2 4, 4 4))", 2);
assertSTNumGeometries("MULTIPOLYGON (((1 1, 1 3, 3 3, 3 1, 1 1)), ((2 4, 2 6, 6 6, 6 4, 2 4)))", 2);
assertSTNumGeometries("GEOMETRYCOLLECTION(POINT(2 3), LINESTRING (2 3, 3 4))", 2);
}
|
@Override
public boolean supportsANSI92IntermediateSQL() {
return false;
}
|
@Test
void assertSupportsANSI92IntermediateSQL() {
assertFalse(metaData.supportsANSI92IntermediateSQL());
}
|
@SafeVarargs
public static Optional<Predicate<Throwable>> createNegatedExceptionsPredicate(
Class<? extends Throwable>... ignoreExceptions) {
return exceptionPredicate(ignoreExceptions)
.map(Predicate::negate);
}
|
@Test
public void buildIgnoreExceptionsPredicate() {
Predicate<Throwable> predicate = PredicateCreator
.createNegatedExceptionsPredicate(RuntimeException.class, BusinessException.class)
.orElseThrow();
then(predicate.test(new RuntimeException())).isFalse();
then(predicate.test(new IllegalArgumentException())).isFalse();
then(predicate.test(new Throwable())).isTrue();
then(predicate.test(new Exception())).isTrue();
then(predicate.test(new IOException())).isTrue();
then(predicate.test(new BusinessException())).isFalse();
}
|
@Override
public SinkWriter<WindowedValue<IsmRecord<V>>> writer() throws IOException {
return new IsmSinkWriter(FileSystems.create(resourceId, MimeTypes.BINARY));
}
|
@Test
public void testWriteOutOfOrderKeysWithSameShardKeyIsError() throws Throwable {
IsmSink<byte[]> sink =
new IsmSink<>(
FileSystems.matchNewResource(tmpFolder.newFile().getPath(), false),
CODER,
BLOOM_FILTER_SIZE_LIMIT);
SinkWriter<WindowedValue<IsmRecord<byte[]>>> sinkWriter = sink.writer();
sinkWriter.add(
new ValueInEmptyWindows<>(IsmRecord.of(ImmutableList.of(EMPTY, new byte[] {0x01}), EMPTY)));
expectedException.expect(IllegalArgumentException.class);
expectedException.expectMessage("expects keys to be written in strictly increasing order");
sinkWriter.add(
new ValueInEmptyWindows<>(IsmRecord.of(ImmutableList.of(EMPTY, new byte[] {0x00}), EMPTY)));
}
|
public static String fromBytes(byte[] bytes) throws IOException {
DataInputBuffer dbuf = new DataInputBuffer();
dbuf.reset(bytes, 0, bytes.length);
StringBuilder buf = new StringBuilder(bytes.length);
readChars(dbuf, buf, bytes.length);
return buf.toString();
}
|
@Test
public void test5ByteUtf8Sequence() throws Exception {
byte[] invalid = new byte[] {
0x01, 0x02, (byte)0xf8, (byte)0x88, (byte)0x80,
(byte)0x80, (byte)0x80, 0x04, 0x05 };
try {
UTF8.fromBytes(invalid);
fail("did not throw an exception");
} catch (UTFDataFormatException utfde) {
GenericTestUtils.assertExceptionContains(
"Invalid UTF8 at f88880808004", utfde);
}
}
|
public AlterSourceCommand create(final AlterSource statement) {
final DataSource dataSource = metaStore.getSource(statement.getName());
final String dataSourceType = statement.getDataSourceType().getKsqlType();
if (dataSource != null && dataSource.isSource()) {
throw new KsqlException(
String.format("Cannot alter %s '%s': ALTER operations are not supported on source %s.",
dataSourceType.toLowerCase(),
statement.getName().text(),
dataSourceType.toLowerCase() + "s"));
}
final List<Column> newColumns = statement
.getAlterOptions()
.stream()
.map(
alterOption -> Column.of(
ColumnName.of(alterOption.getColumnName()),
alterOption.getType().getSqlType(),
Namespace.VALUE,
0))
.collect(Collectors.toList());
return new AlterSourceCommand(
statement.getName(),
dataSourceType,
newColumns
);
}
|
@Test
public void shouldCreateCommandForAlterTable() {
// Given:
final AlterSource alterSource = new AlterSource(TABLE_NAME, DataSourceType.KTABLE, NEW_COLUMNS);
// When:
final AlterSourceCommand result = alterSourceFactory.create(alterSource);
// Then:
assertEquals(result.getKsqlType(), DataSourceType.KTABLE.getKsqlType());
assertEquals(result.getSourceName(), TABLE_NAME);
assertEquals(result.getNewColumns().size(), 1);
}
|
public String toSnapshot(boolean hOption) {
return String.format(SNAPSHOT_FORMAT, formatSize(snapshotLength, hOption),
formatSize(snapshotFileCount, hOption),
formatSize(snapshotDirectoryCount, hOption),
formatSize(snapshotSpaceConsumed, hOption));
}
|
@Test
public void testToSnapshotNotHumanReadable() {
long snapshotLength = 1111;
long snapshotFileCount = 2222;
long snapshotDirectoryCount = 3333;
long snapshotSpaceConsumed = 4444;
ContentSummary contentSummary = new ContentSummary.Builder()
.snapshotLength(snapshotLength).snapshotFileCount(snapshotFileCount)
.snapshotDirectoryCount(snapshotDirectoryCount)
.snapshotSpaceConsumed(snapshotSpaceConsumed).build();
String expected =
" 1111 2222 3333 "
+ " 4444 ";
assertEquals(expected, contentSummary.toSnapshot(false));
}
|
@Override
public ProxyTopicRouteData getTopicRouteForProxy(ProxyContext ctx, List<Address> requestHostAndPortList,
String topicName) throws Exception {
TopicRouteData topicRouteData = getAllMessageQueueView(ctx, topicName).getTopicRouteData();
return new ProxyTopicRouteData(topicRouteData, requestHostAndPortList);
}
|
@Test
public void testGetTopicRouteForProxy() throws Throwable {
ProxyContext ctx = ProxyContext.create();
List<Address> addressList = Lists.newArrayList(new Address(Address.AddressScheme.IPv4, HostAndPort.fromParts("127.0.0.1", 8888)));
ProxyTopicRouteData proxyTopicRouteData = this.topicRouteService.getTopicRouteForProxy(ctx, addressList, TOPIC);
assertEquals(1, proxyTopicRouteData.getBrokerDatas().size());
assertEquals(addressList, proxyTopicRouteData.getBrokerDatas().get(0).getBrokerAddrs().get(MixAll.MASTER_ID));
}
|
@Override
public ByteBuf discardReadBytes() {
throw new ReadOnlyBufferException();
}
|
@Test
public void shouldRejectDiscardReadBytes() {
assertThrows(UnsupportedOperationException.class, new Executable() {
@Override
public void execute() {
unmodifiableBuffer(EMPTY_BUFFER).discardReadBytes();
}
});
}
|
CacheConfig<K, V> asCacheConfig() {
return this.copy(new CacheConfig<>(), false);
}
|
@Test
public void serializationSucceeds_whenKVTypes_setAsClassObjects() {
CacheConfig cacheConfig = newDefaultCacheConfig("test");
cacheConfig.setKeyType(Integer.class);
cacheConfig.setValueType(String.class);
PreJoinCacheConfig preJoinCacheConfig = new PreJoinCacheConfig(cacheConfig);
Data data = serializationService.toData(preJoinCacheConfig);
PreJoinCacheConfig deserialized = serializationService.toObject(data);
assertEquals(preJoinCacheConfig, deserialized);
assertEquals(cacheConfig, deserialized.asCacheConfig());
}
|
public static String prettyJSON(String json) {
return prettyJSON(json, TAB_SEPARATOR);
}
|
@Test
public void testRenderResultSimpleStructure() throws Exception {
assertEquals("{\n" + TAB + "\"Hello\": \"World\",\n" + TAB + "\"more\": [\n"
+ TAB + TAB + "\"Something\",\n" + TAB
+ TAB + "\"else\"\n" + TAB + "]\n}", prettyJSON("{\"Hello\": \"World\", \"more\": [\"Something\", \"else\", ]}"));
}
|
public Set<String> getDataStorageTypes() {
return dataStorageMap.keySet();
}
|
@Test
void testGetDataStorageTypes() {
componentHolder.getDataStorageTypes();
}
|
@VisibleForTesting
int getSignedEncodingLength(long n) {
return BITS_TO_LENGTH[log2Floor(n < 0 ? ~n : n) + 1];
}
|
@Test
public void testGetSignedEncodingLength() {
OrderedCode orderedCode = new OrderedCode();
assertEquals(10, orderedCode.getSignedEncodingLength(Long.MIN_VALUE));
assertEquals(10, orderedCode.getSignedEncodingLength(~(1L << 62)));
assertEquals(9, orderedCode.getSignedEncodingLength(~(1L << 62) + 1));
assertEquals(3, orderedCode.getSignedEncodingLength(-8193));
assertEquals(2, orderedCode.getSignedEncodingLength(-8192));
assertEquals(2, orderedCode.getSignedEncodingLength(-65));
assertEquals(1, orderedCode.getSignedEncodingLength(-64));
assertEquals(1, orderedCode.getSignedEncodingLength(-2));
assertEquals(1, orderedCode.getSignedEncodingLength(-1));
assertEquals(1, orderedCode.getSignedEncodingLength(0));
assertEquals(1, orderedCode.getSignedEncodingLength(1));
assertEquals(1, orderedCode.getSignedEncodingLength(63));
assertEquals(2, orderedCode.getSignedEncodingLength(64));
assertEquals(2, orderedCode.getSignedEncodingLength(8191));
assertEquals(3, orderedCode.getSignedEncodingLength(8192));
assertEquals(9, orderedCode.getSignedEncodingLength(1L << 62) - 1);
assertEquals(10, orderedCode.getSignedEncodingLength(1L << 62));
assertEquals(10, orderedCode.getSignedEncodingLength(Long.MAX_VALUE));
}
|
@Override
public boolean addAll(Collection<? extends E> c) {
// will throw UnsupportedOperationException; delegate anyway for testability
return underlying().addAll(c);
}
|
@Test
public void testDelegationOfUnsupportedFunctionAddAll() {
new PCollectionsHashSetWrapperDelegationChecker<>()
.defineMockConfigurationForUnsupportedFunction(mock -> mock.addAll(eq(Collections.emptyList())))
.defineWrapperUnsupportedFunctionInvocation(wrapper -> wrapper.addAll(Collections.emptyList()))
.doUnsupportedFunctionDelegationCheck();
}
|
public double getLightHeight() {
return a;
}
|
@Test
public void examples() {
assertThat("neutral", example(0, 0), is(0));
assertThat("very much away from light", example(1000, -10000), is(-128));
assertThat("exactly pointing at light", example(1 / algo.getLightHeight(), 1 / algo.getLightHeight()), is(127));
}
|
public ResourceMethodDescriptor process(final ServerResourceContext context)
{
String path = context.getRequestURI().getRawPath();
if (path.length() < 2)
{
throw new RoutingException(HttpStatus.S_404_NOT_FOUND.getCode());
}
if (path.charAt(0) == '/')
{
path = path.substring(1);
}
Queue<String> remainingPath = new LinkedList<>(Arrays.asList(SLASH_PATTERN.split(path)));
String rootPath = "/" + remainingPath.poll();
ResourceModel currentResource;
try
{
currentResource =
_pathRootResourceMap.get(URLDecoder.decode(rootPath,
RestConstants.DEFAULT_CHARSET_NAME));
}
catch (UnsupportedEncodingException e)
{
throw new RestLiInternalException("UnsupportedEncodingException while trying to decode the root path",
e);
}
if (currentResource == null)
{
throw new RoutingException(String.format("No root resource defined for path '%s'",
rootPath),
HttpStatus.S_404_NOT_FOUND.getCode());
}
return processResourceTree(currentResource, context, remainingPath);
}
|
@Test
public void succeedsOnRootResourceGet() throws URISyntaxException
{
final TestSetup setup = new TestSetup();
setup.mockContextForRootResourceGetRequest(setup._rootPath + "/12345");
final RestLiRouter router = setup._router;
final ServerResourceContext context = setup._context;
final ResourceMethodDescriptor method = router.process(context);
Assert.assertNotNull(method);
}
|
@Override
public PollResult poll(long currentTimeMs) {
return pollInternal(
prepareFetchRequests(),
this::handleFetchSuccess,
this::handleFetchFailure
);
}
|
@Test
public void testPreferredReadReplica() {
buildFetcher(new MetricConfig(), OffsetResetStrategy.EARLIEST, new BytesDeserializer(), new BytesDeserializer(),
Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED, Duration.ofMinutes(5).toMillis());
subscriptions.assignFromUser(singleton(tp0));
client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(2, singletonMap(topicName, 4), tp -> validLeaderEpoch, topicIds, false));
subscriptions.seek(tp0, 0);
// Take note of the preferred replica before the first fetch response
Node selected = fetcher.selectReadReplica(tp0, Node.noNode(), time.milliseconds());
assertEquals(-1, selected.id());
assertEquals(1, sendFetches());
assertFalse(fetcher.hasCompletedFetches());
// Set preferred read replica to node=1
client.prepareResponse(fullFetchResponse(tidp0, records, Errors.NONE, 100L,
FetchResponse.INVALID_LAST_STABLE_OFFSET, 0, Optional.of(1)));
networkClientDelegate.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> partitionRecords = fetchRecords();
assertTrue(partitionRecords.containsKey(tp0));
// Verify
selected = fetcher.selectReadReplica(tp0, Node.noNode(), time.milliseconds());
assertEquals(1, selected.id());
assertEquals(1, sendFetches());
assertFalse(fetcher.hasCompletedFetches());
// Set preferred read replica to node=2, which isn't in our metadata, should revert to leader
client.prepareResponse(fullFetchResponse(tidp0, records, Errors.NONE, 100L,
FetchResponse.INVALID_LAST_STABLE_OFFSET, 0, Optional.of(2)));
networkClientDelegate.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
fetchRecords();
selected = fetcher.selectReadReplica(tp0, Node.noNode(), time.milliseconds());
assertEquals(-1, selected.id());
}
|
public static String getPartitionColumn(TableConfig tableConfig) {
// check InstanceAssignmentConfigMap is null or empty,
if (!MapUtils.isEmpty(tableConfig.getInstanceAssignmentConfigMap())) {
for (InstanceAssignmentConfig instanceAssignmentConfig : tableConfig.getInstanceAssignmentConfigMap().values()) {
//check InstanceAssignmentConfig has the InstanceReplicaGroupPartitionConfig with non-empty partitionColumn
if (StringUtils.isNotEmpty(instanceAssignmentConfig.getReplicaGroupPartitionConfig().getPartitionColumn())) {
return instanceAssignmentConfig.getReplicaGroupPartitionConfig().getPartitionColumn();
}
}
}
// for backward-compatibility, If partitionColumn value isn't there in InstanceReplicaGroupPartitionConfig
// check ReplicaGroupStrategyConfig for partitionColumn
ReplicaGroupStrategyConfig replicaGroupStrategyConfig =
tableConfig.getValidationConfig().getReplicaGroupStrategyConfig();
return replicaGroupStrategyConfig != null ? replicaGroupStrategyConfig.getPartitionColumn() : null;
}
|
@Test
public void testGetPartitionColumnWithoutAnyConfig() {
// without instanceAssignmentConfigMap
TableConfig tableConfig =
new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME).build();
Assert.assertNull(TableConfigUtils.getPartitionColumn(tableConfig));
}
|
public final StringSubject hasMessageThat() {
StandardSubjectBuilder check = check("getMessage()");
if (actual instanceof ErrorWithFacts && ((ErrorWithFacts) actual).facts().size() > 1) {
check =
check.withMessage(
"(Note from Truth: When possible, instead of asserting on the full message, assert"
+ " about individual facts by using ExpectFailure.assertThat.)");
}
return check.that(checkNotNull(actual).getMessage());
}
|
@Test
public void hasMessageThat_MessageHasNullMessage_failure() {
expectFailureWhenTestingThat(new NullPointerException("message")).hasMessageThat().isNull();
}
|
public Order shipDate(OffsetDateTime shipDate) {
this.shipDate = shipDate;
return this;
}
|
@Test
public void shipDateTest() {
// TODO: test shipDate
}
|
@Override
public Expression createExpression(Expression source, String expression, Object[] properties) {
return doCreateJsonPathExpression(source, expression, properties, false);
}
|
@Test
public void testExpressionPojo() {
Exchange exchange = new DefaultExchange(context);
Map pojo = new HashMap();
pojo.put("kind", "full");
pojo.put("type", "customer");
exchange.getIn().setBody(pojo);
Language lan = context.resolveLanguage("jsonpath");
Expression exp = lan.createExpression("$.kind");
String kind = exp.evaluate(exchange, String.class);
assertNotNull(kind);
assertEquals("full", kind);
exp = lan.createExpression("$.type");
String type = exp.evaluate(exchange, String.class);
assertNotNull(type);
assertEquals("customer", type);
}
|
public Blob build() throws IOException {
UniqueTarArchiveEntries uniqueTarArchiveEntries = new UniqueTarArchiveEntries();
// Adds all the layer entries as tar entries.
for (FileEntry layerEntry : layerEntries) {
// Adds the entries to uniqueTarArchiveEntries, which makes sure all entries are unique and
// adds parent directories for each extraction path.
TarArchiveEntry entry =
new TarArchiveEntry(
layerEntry.getSourceFile(), layerEntry.getExtractionPath().toString());
// Sets the entry's permissions by masking out the permission bits from the entry's mode (the
// lowest 9 bits) then using a bitwise OR to set them to the layerEntry's permissions.
entry.setMode((entry.getMode() & ~0777) | layerEntry.getPermissions().getPermissionBits());
setUserAndGroup(entry, layerEntry);
clearTimeHeaders(entry, layerEntry.getModificationTime());
uniqueTarArchiveEntries.add(entry);
}
// Gets the entries sorted by extraction path.
List<TarArchiveEntry> sortedFilesystemEntries = uniqueTarArchiveEntries.getSortedEntries();
Set<String> names = new HashSet<>();
// Adds all the files to a tar stream.
TarStreamBuilder tarStreamBuilder = new TarStreamBuilder();
for (TarArchiveEntry entry : sortedFilesystemEntries) {
Verify.verify(!names.contains(entry.getName()));
names.add(entry.getName());
tarStreamBuilder.addTarArchiveEntry(entry);
}
return Blobs.from(tarStreamBuilder::writeAsTarArchiveTo, false);
}
|
@Test
public void testToBlob_reproducibility() throws IOException {
Path testRoot = temporaryFolder.getRoot().toPath();
Path root1 = Files.createDirectories(testRoot.resolve("files1"));
Path root2 = Files.createDirectories(testRoot.resolve("files2"));
// TODO: Currently this test only covers variation in order and modification time, even though
// TODO: the code is designed to clean up userid/groupid, this test does not check that yet.
String contentA = "abcabc";
Path fileA1 = createFile(root1, "fileA", contentA, 10000);
Path fileA2 = createFile(root2, "fileA", contentA, 20000);
String contentB = "yumyum";
Path fileB1 = createFile(root1, "fileB", contentB, 10000);
Path fileB2 = createFile(root2, "fileB", contentB, 20000);
// check if modification times are off
assertThat(Files.getLastModifiedTime(fileA2)).isNotEqualTo(Files.getLastModifiedTime(fileA1));
assertThat(Files.getLastModifiedTime(fileB2)).isNotEqualTo(Files.getLastModifiedTime(fileB1));
// create layers of exact same content but ordered differently and with different timestamps
Blob layer =
new ReproducibleLayerBuilder(
ImmutableList.of(
defaultLayerEntry(fileA1, AbsoluteUnixPath.get("/somewhere/fileA")),
defaultLayerEntry(fileB1, AbsoluteUnixPath.get("/somewhere/fileB"))))
.build();
Blob reproduced =
new ReproducibleLayerBuilder(
ImmutableList.of(
defaultLayerEntry(fileB2, AbsoluteUnixPath.get("/somewhere/fileB")),
defaultLayerEntry(fileA2, AbsoluteUnixPath.get("/somewhere/fileA"))))
.build();
byte[] layerContent = Blobs.writeToByteArray(layer);
byte[] reproducedLayerContent = Blobs.writeToByteArray(reproduced);
assertThat(layerContent).isEqualTo(reproducedLayerContent);
}
|
@Override
public T deserialize(final String topic, final byte[] bytes) {
try {
if (bytes == null) {
return null;
}
// don't use the JsonSchemaConverter to read this data because
// we require that the MAPPER enables USE_BIG_DECIMAL_FOR_FLOATS,
// which is not currently available in the standard converters
final JsonNode value = isJsonSchema
? JsonSerdeUtils.readJsonSR(bytes, MAPPER, JsonNode.class)
: MAPPER.readTree(bytes);
final Object coerced = enforceFieldType(
"$",
new JsonValueContext(value, schema)
);
if (LOG.isTraceEnabled()) {
LOG.trace("Deserialized {}. topic:{}, row:{}", target, topic, coerced);
}
return SerdeUtils.castToTargetType(coerced, targetType);
} catch (final Exception e) {
// Clear location in order to avoid logging data, for security reasons
if (e instanceof JsonParseException) {
((JsonParseException) e).clearLocation();
}
throw new SerializationException(
"Failed to deserialize " + target + " from topic: " + topic + ". " + e.getMessage(), e);
}
}
|
@Test
public void shouldDeserializedJsonArray() {
// Given:
final KsqlJsonDeserializer<List> deserializer = givenDeserializerForSchema(
SchemaBuilder
.array(Schema.OPTIONAL_INT64_SCHEMA)
.build(),
List.class
);
final byte[] bytes = serializeJson(ImmutableList.of(42, 42.000, "42"));
// When:
final Object result = deserializer.deserialize(SOME_TOPIC, bytes);
// Then:
assertThat(result, is(ImmutableList.of(42L, 42L, 42L)));
}
|
@Override
public void sendHeartbeatInvokeMessage(int currentId) {
var nextInstance = this.findNextInstance(currentId);
var heartbeatInvokeMessage = new Message(MessageType.HEARTBEAT_INVOKE, "");
nextInstance.onMessage(heartbeatInvokeMessage);
}
|
@Test
void testSendHeartbeatInvokeMessage() {
try {
var instance1 = new RingInstance(null, 1, 1);
var instance2 = new RingInstance(null, 1, 2);
var instance3 = new RingInstance(null, 1, 3);
Map<Integer, Instance> instanceMap = Map.of(1, instance1, 2, instance2, 3, instance3);
var messageManager = new RingMessageManager(instanceMap);
messageManager.sendHeartbeatInvokeMessage(2);
var ringMessage = new Message(MessageType.HEARTBEAT_INVOKE, "");
var instanceClass = AbstractInstance.class;
var messageQueueField = instanceClass.getDeclaredField("messageQueue");
messageQueueField.setAccessible(true);
var ringMessageSent = ((Queue<Message>) messageQueueField.get(instance3)).poll();
assertEquals(ringMessageSent.getType(), ringMessage.getType());
assertEquals(ringMessageSent.getContent(), ringMessage.getContent());
} catch (NoSuchFieldException | IllegalAccessException e) {
fail("Error to access private field.");
}
}
|
private long parseTimeoutMs() {
long timeout = options.has(timeoutMsOpt) ? options.valueOf(timeoutMsOpt) : -1;
return timeout >= 0 ? timeout : Long.MAX_VALUE;
}
|
@Test
public void testParseTimeoutMs() throws Exception {
String[] withoutTimeoutMs = new String[]{
"--bootstrap-server", "localhost:9092",
"--topic", "test",
"--partition", "0"
};
assertEquals(Long.MAX_VALUE, new ConsoleConsumerOptions(withoutTimeoutMs).timeoutMs());
String[] negativeTimeoutMs = new String[]{
"--bootstrap-server", "localhost:9092",
"--topic", "test",
"--partition", "0",
"--timeout-ms", "-100"
};
assertEquals(Long.MAX_VALUE, new ConsoleConsumerOptions(negativeTimeoutMs).timeoutMs());
String[] validTimeoutMs = new String[]{
"--bootstrap-server", "localhost:9092",
"--topic", "test",
"--partition", "0",
"--timeout-ms", "100"
};
assertEquals(100, new ConsoleConsumerOptions(validTimeoutMs).timeoutMs());
}
|
@Override
public PageResult<MailAccountDO> getMailAccountPage(MailAccountPageReqVO pageReqVO) {
return mailAccountMapper.selectPage(pageReqVO);
}
|
@Test
public void testGetMailAccountPage() {
// mock 数据
MailAccountDO dbMailAccount = randomPojo(MailAccountDO.class, o -> { // 等会查询到
o.setMail("768@qq.com");
o.setUsername("yunai");
});
mailAccountMapper.insert(dbMailAccount);
// 测试 mail 不匹配
mailAccountMapper.insert(cloneIgnoreId(dbMailAccount, o -> o.setMail("788@qq.com")));
// 测试 username 不匹配
mailAccountMapper.insert(cloneIgnoreId(dbMailAccount, o -> o.setUsername("tudou")));
// 准备参数
MailAccountPageReqVO reqVO = new MailAccountPageReqVO();
reqVO.setMail("768");
reqVO.setUsername("yu");
// 调用
PageResult<MailAccountDO> pageResult = mailAccountService.getMailAccountPage(reqVO);
// 断言
assertEquals(1, pageResult.getTotal());
assertEquals(1, pageResult.getList().size());
assertPojoEquals(dbMailAccount, pageResult.getList().get(0));
}
|
private static FeedbackDelayGenerator resolveDelayGenerator(
final Context ctx,
final UdpChannel channel,
final boolean isMulticastSemantics)
{
if (isMulticastSemantics)
{
return ctx.multicastFeedbackDelayGenerator();
}
final Long nakDelayNs = channel.nakDelayNs();
if (null != nakDelayNs)
{
final long retryDelayNs = nakDelayNs * ctx.nakUnicastRetryDelayRatio();
return new StaticDelayGenerator(nakDelayNs, retryDelayNs);
}
else
{
return ctx.unicastFeedbackDelayGenerator();
}
}
|
@Test
void shouldInferFeedbackGeneratorBasedOnMulticastAddress()
{
final MediaDriver.Context context = new MediaDriver.Context()
.multicastFeedbackDelayGenerator(new OptimalMulticastDelayGenerator(10, 10))
.unicastFeedbackDelayGenerator(new StaticDelayGenerator(10));
final UdpChannel udpChannel = UdpChannel.parse("aeron:udp?endpoint=224.20.30.39:24326");
final FeedbackDelayGenerator feedbackDelayGenerator = DriverConductor.resolveDelayGenerator(
context, udpChannel, InferableBoolean.INFER, (short)0);
assertSame(context.multicastFeedbackDelayGenerator(), feedbackDelayGenerator);
}
|
@Override
public String render(String text) {
if (StringUtils.isBlank(text)) {
return "";
}
if (regex.isEmpty() || link.isEmpty()) {
Comment comment = new Comment();
comment.escapeAndAdd(text);
return comment.render();
}
try {
Matcher matcher = Pattern.compile(regex).matcher(text);
int start = 0;
Comment comment = new Comment();
while (hasMatch(matcher)) {
comment.escapeAndAdd(text.substring(start, matcher.start()));
comment.add(dynamicLink(matcher));
start = matcher.end();
}
comment.escapeAndAdd(text.substring(start));
return comment.render();
} catch (PatternSyntaxException e) {
LOGGER.warn("Illegal regular expression: {} - {}", regex, e.getMessage());
}
return text;
}
|
@Test
public void shouldRenderStringWithSpecifiedRegexAndLink() throws Exception {
String link = "http://mingle05/projects/cce/cards/${ID}";
String regex = "(evo-\\d+)";
trackingTool = new DefaultCommentRenderer(link, regex);
String result = trackingTool.render("evo-111: checkin message");
assertThat(result,
is("<a href=\"" + "http://mingle05/projects/cce/cards/evo-111\" "
+ "target=\"story_tracker\">evo-111</a>: checkin message"));
}
|
public static SchemaAndValue parseString(String value) {
if (value == null) {
return NULL_SCHEMA_AND_VALUE;
}
if (value.isEmpty()) {
return new SchemaAndValue(Schema.STRING_SCHEMA, value);
}
ValueParser parser = new ValueParser(new Parser(value));
return parser.parse(false);
}
|
@Test
public void shouldParseMultipleTimestampStringAsTimestampInArray() throws Exception {
String tsStr1 = "2019-08-23T14:34:54.346Z";
String tsStr2 = "2019-01-23T15:12:34.567Z";
String tsStr3 = "2019-04-23T19:12:34.567Z";
String arrayStr = "[" + tsStr1 + "," + tsStr2 + ", " + tsStr3 + "]";
SchemaAndValue result = Values.parseString(arrayStr);
assertEquals(Type.ARRAY, result.schema().type());
Schema elementSchema = result.schema().valueSchema();
assertEquals(Type.INT64, elementSchema.type());
assertEquals(Timestamp.LOGICAL_NAME, elementSchema.name());
java.util.Date expected1 = new SimpleDateFormat(Values.ISO_8601_TIMESTAMP_FORMAT_PATTERN).parse(tsStr1);
java.util.Date expected2 = new SimpleDateFormat(Values.ISO_8601_TIMESTAMP_FORMAT_PATTERN).parse(tsStr2);
java.util.Date expected3 = new SimpleDateFormat(Values.ISO_8601_TIMESTAMP_FORMAT_PATTERN).parse(tsStr3);
assertEquals(Arrays.asList(expected1, expected2, expected3), result.value());
}
|
@VisibleForTesting
static DeterminismEnvelope<ResourceID> getTaskManagerResourceID(
Configuration config, String rpcAddress, int rpcPort) {
final String metadata =
config.get(TaskManagerOptionsInternal.TASK_MANAGER_RESOURCE_ID_METADATA, "");
return config.getOptional(TaskManagerOptions.TASK_MANAGER_RESOURCE_ID)
.map(
value ->
DeterminismEnvelope.deterministicValue(
new ResourceID(value, metadata)))
.orElseGet(
FunctionUtils.uncheckedSupplier(
() -> {
final String hostName =
InetAddress.getLocalHost().getHostName();
final String value =
StringUtils.isNullOrWhitespaceOnly(rpcAddress)
? hostName
+ "-"
+ new AbstractID()
.toString()
.substring(0, 6)
: rpcAddress
+ ":"
+ rpcPort
+ "-"
+ new AbstractID()
.toString()
.substring(0, 6);
return DeterminismEnvelope.nondeterministicValue(
new ResourceID(value, metadata));
}));
}
|
@Test
void testGenerateTaskManagerResourceIDWithRemoteRpcService() throws Exception {
final Configuration configuration = createConfiguration();
final String rpcAddress = "flink";
final int rpcPort = 9090;
final ResourceID taskManagerResourceID =
TaskManagerRunner.getTaskManagerResourceID(configuration, rpcAddress, rpcPort)
.unwrap();
assertThat(taskManagerResourceID).isNotNull();
assertThat(taskManagerResourceID.getResourceIdString())
.contains(rpcAddress + ":" + rpcPort);
}
|
public static HeightLock ofBlockHeight(int blockHeight) {
if (blockHeight < 0)
throw new IllegalArgumentException("illegal negative block height: " + blockHeight);
if (blockHeight >= THRESHOLD)
throw new IllegalArgumentException("block height too high: " + blockHeight);
return new HeightLock(blockHeight);
}
|
@Test
public void blockHeightSubtype() {
LockTime blockHeight = LockTime.ofBlockHeight(100);
assertTrue(blockHeight instanceof HeightLock);
assertTrue(((HeightLock) blockHeight).blockHeight() > 0);
}
|
@Override
public boolean supports(String hashedPassword) {
return hashedPassword.startsWith(PREFIX) && hashedPassword.contains(SALT_PREFIX);
}
|
@Test
public void testSupports() throws Exception {
assertThat(bCryptPasswordAlgorithm.supports("foobar")).isFalse();
assertThat(bCryptPasswordAlgorithm.supports("{bcrypt}foobar")).isFalse();
assertThat(bCryptPasswordAlgorithm.supports("{bcrypt}foobar{salt}pepper")).isTrue();
assertThat(bCryptPasswordAlgorithm.supports("{foobar}foobar")).isFalse();
}
|
public static String[] subtraction(String[] arr1, String[] arr2) {
if (arr1 == null || arr1.length == 0 || arr2 == null || arr2.length == 0) {
return arr1;
}
List<String> list = new ArrayList<>(Arrays.asList(arr1));
list.removeAll(Arrays.asList(arr2));
return list.toArray(new String[0]);
}
|
@Test
void testArraySubtraction() {
assertNull(StringUtil.subtraction(null, arr("a", "test", "b", "a")));
assertArrayEquals(arr("a", "test", "b", "a"), StringUtil.subtraction(arr("a", "test", "b", "a"), null));
assertArrayEquals(arr("test"), StringUtil.subtraction(arr("a", "test", "b", "a"), arr("a", "b")));
assertArrayEquals(arr(), StringUtil.subtraction(arr(), arr("a", "b")));
assertArrayEquals(arr("a", "b"), StringUtil.subtraction(arr("a", "b"), arr()));
assertArrayEquals(arr(), StringUtil.subtraction(arr("a", "test", "b", "a"), arr("a", "b", "test")));
}
|
@Override
public Map<String, String> getAllVariables() {
Map<String, String> allVariables =
parentMetricGroup.getAllVariables(
this.settings.getReporterIndex(), this.settings.getExcludedVariables());
if (!this.settings.getAdditionalVariables().isEmpty()) {
allVariables = new CompositeMap(allVariables, this.settings.getAdditionalVariables());
}
return Collections.unmodifiableMap(allVariables);
}
|
@Test
void testGetAllVariablesWithAdditionalVariables() {
final FrontMetricGroup<?> frontMetricGroup =
new FrontMetricGroup<>(
new ReporterScopedSettings(
0,
'.',
MetricFilter.NO_OP_FILTER,
Collections.emptySet(),
ImmutableMap.of(ScopeFormat.asVariable("foo"), "bar")),
new ProcessMetricGroup(TestingMetricRegistry.builder().build(), "host"));
assertThat(frontMetricGroup.getAllVariables())
.containsEntry(ScopeFormat.asVariable("foo"), "bar");
}
|
public final void containsNoneIn(@Nullable Iterable<?> excluded) {
Collection<?> actual = iterableToCollection(checkNotNull(this.actual));
checkNotNull(excluded); // TODO(cpovirk): Produce a better exception message.
List<@Nullable Object> present = new ArrayList<>();
for (Object item : Sets.newLinkedHashSet(excluded)) {
if (actual.contains(item)) {
present.add(item);
}
}
if (!present.isEmpty()) {
failWithoutActual(
fact("expected not to contain any of", annotateEmptyStrings(excluded)),
fact("but contained", annotateEmptyStrings(present)),
fullContents());
}
}
|
@Test
public void iterableContainsNoneInArray() {
assertThat(asList(1, 2, 3)).containsNoneIn(new Integer[] {4, 5, 6});
expectFailureWhenTestingThat(asList(1, 2, 3)).containsNoneIn(new Integer[] {1, 2, 4});
}
|
void validateLogLevelConfigs(Collection<AlterableConfig> ops) {
ops.forEach(op -> {
String loggerName = op.name();
switch (OpType.forId(op.configOperation())) {
case SET:
validateLoggerNameExists(loggerName);
String logLevel = op.value();
if (!LogLevelConfig.VALID_LOG_LEVELS.contains(logLevel)) {
throw new InvalidConfigurationException("Cannot set the log level of " +
loggerName + " to " + logLevel + " as it is not a supported log level. " +
"Valid log levels are " + VALID_LOG_LEVELS_STRING);
}
break;
case DELETE:
validateLoggerNameExists(loggerName);
if (loggerName.equals(Log4jController.ROOT_LOGGER())) {
throw new InvalidRequestException("Removing the log level of the " +
Log4jController.ROOT_LOGGER() + " logger is not allowed");
}
break;
case APPEND:
throw new InvalidRequestException(OpType.APPEND +
" operation is not allowed for the " + BROKER_LOGGER + " resource");
case SUBTRACT:
throw new InvalidRequestException(OpType.SUBTRACT +
" operation is not allowed for the " + BROKER_LOGGER + " resource");
default:
throw new InvalidRequestException("Unknown operation type " +
(int) op.configOperation() + " is not allowed for the " +
BROKER_LOGGER + " resource");
}
});
}
|
@Test
public void testValidateSetLogLevelConfig() {
MANAGER.validateLogLevelConfigs(Arrays.asList(new AlterableConfig().
setName(LOG.getName()).
setConfigOperation(OpType.SET.id()).
setValue("TRACE")));
}
|
@Nonnull
@Override
public ScheduledFuture<?> schedule(@Nonnull Runnable command, long delay, @Nonnull TimeUnit unit) {
scheduledOnce.mark();
return delegate.schedule(new InstrumentedRunnable(command), delay, unit);
}
|
@Test
public void testScheduleCallable() throws Exception {
assertThat(submitted.getCount()).isZero();
assertThat(running.getCount()).isZero();
assertThat(completed.getCount()).isZero();
assertThat(duration.getCount()).isZero();
assertThat(scheduledOnce.getCount()).isZero();
assertThat(scheduledRepetitively.getCount()).isZero();
assertThat(scheduledOverrun.getCount()).isZero();
assertThat(percentOfPeriod.getCount()).isZero();
final Object obj = new Object();
ScheduledFuture<Object> theFuture = instrumentedScheduledExecutor.schedule(() -> {
assertThat(submitted.getCount()).isZero();
assertThat(running.getCount()).isEqualTo(1);
assertThat(completed.getCount()).isZero();
assertThat(duration.getCount()).isZero();
assertThat(scheduledOnce.getCount()).isEqualTo(1);
assertThat(scheduledRepetitively.getCount()).isZero();
assertThat(scheduledOverrun.getCount()).isZero();
assertThat(percentOfPeriod.getCount()).isZero();
return obj;
}, 10L, TimeUnit.MILLISECONDS);
assertThat(theFuture.get()).isEqualTo(obj);
assertThat(submitted.getCount()).isZero();
assertThat(running.getCount()).isZero();
assertThat(completed.getCount()).isEqualTo(1);
assertThat(duration.getCount()).isEqualTo(1);
assertThat(duration.getSnapshot().size()).isEqualTo(1);
assertThat(scheduledOnce.getCount()).isEqualTo(1);
assertThat(scheduledRepetitively.getCount()).isZero();
assertThat(scheduledOverrun.getCount()).isZero();
assertThat(percentOfPeriod.getCount()).isZero();
}
|
public boolean containsInt(final int value)
{
return -1 != indexOf(value);
}
|
@Test
void shouldContainCorrectValues()
{
final int count = 20;
IntStream.range(0, count).forEachOrdered(list::addInt);
for (int i = 0; i < count; i++)
{
assertTrue(list.containsInt(i));
}
assertFalse(list.containsInt(-1));
assertFalse(list.containsInt(20));
}
|
private static void sqlConfig(XmlGenerator gen, Config config) {
SqlConfig sqlConfig = config.getSqlConfig();
JavaSerializationFilterConfig filterConfig = sqlConfig.getJavaReflectionFilterConfig();
gen.open("sql")
.node("statement-timeout-millis", sqlConfig.getStatementTimeoutMillis())
.node("catalog-persistence-enabled", sqlConfig.isCatalogPersistenceEnabled());
if (filterConfig != null) {
gen.open("java-reflection-filter", "defaults-disabled", filterConfig.isDefaultsDisabled());
appendFilterList(gen, "blacklist", filterConfig.getBlacklist());
appendFilterList(gen, "whitelist", filterConfig.getWhitelist());
gen.close();
}
gen.close();
}
|
@Test
public void testSqlConfig() {
Config config = new Config();
config.getSqlConfig().setStatementTimeoutMillis(30L);
config.getSqlConfig().setCatalogPersistenceEnabled(true);
JavaSerializationFilterConfig filterConfig = new JavaSerializationFilterConfig();
filterConfig.getWhitelist().addClasses("com.foo.bar.MyClass");
filterConfig.getBlacklist().addPackages("magic.collection.of.code");
config.getSqlConfig().setJavaReflectionFilterConfig(filterConfig);
SqlConfig generatedConfig = getNewConfigViaXMLGenerator(config).getSqlConfig();
assertEquals(config.getSqlConfig().getStatementTimeoutMillis(), generatedConfig.getStatementTimeoutMillis());
assertEquals(config.getSqlConfig().isCatalogPersistenceEnabled(), generatedConfig.isCatalogPersistenceEnabled());
assertEquals(config.getSqlConfig().getJavaReflectionFilterConfig(), generatedConfig.getJavaReflectionFilterConfig());
}
|
@Override
protected boolean updateCacheIfNeed(final ConfigData<RuleData> result) {
return updateCacheIfNeed(result, ConfigGroupEnum.RULE);
}
|
@Test
public void testUpdateCacheIfNeed() {
final RuleDataRefresh ruleDataRefresh = mockRuleDataRefresh;
// update cache, then assert equals
ConfigData<RuleData> expect = new ConfigData<>();
expect.setLastModifyTime(System.currentTimeMillis());
ruleDataRefresh.updateCacheIfNeed(expect);
assertThat(ruleDataRefresh.cacheConfigData(), is(expect));
}
|
private Object getKey(WindowedValue<InT> elem) {
KV<?, ?> kv = (KV<?, ?>) elem.getValue();
if (kv == null) {
return NULL_KEY;
} else {
Object key = kv.getKey();
return key == null ? NULL_KEY : key;
}
}
|
@Test
@Ignore("https://github.com/apache/beam/issues/23745")
public void testPipelineWithState() {
final List<KV<String, String>> input =
new ArrayList<>(
Arrays.asList(
KV.of("apple", "red"),
KV.of("banana", "yellow"),
KV.of("apple", "yellow"),
KV.of("grape", "purple"),
KV.of("banana", "yellow")));
final Map<String, Integer> expectedCount = ImmutableMap.of("apple", 2, "banana", 2, "grape", 1);
// TODO: remove after SAMZA-2761 fix
for (int i = 0; i < 20; i++) {
input.add(KV.of("*", "*"));
}
final DoFn<KV<String, String>, KV<String, Integer>> fn =
new DoFn<KV<String, String>, KV<String, Integer>>() {
@StateId("cc")
private final StateSpec<CombiningState<Integer, int[], Integer>> countState =
StateSpecs.combiningFromInputInternal(VarIntCoder.of(), Sum.ofIntegers());
@ProcessElement
public void processElement(
ProcessContext c, @StateId("cc") CombiningState<Integer, int[], Integer> countState) {
if (c.element().getKey().equals("*")) {
return;
}
countState.add(1);
String key = c.element().getKey();
int n = countState.read();
if (n >= expectedCount.get(key)) {
c.output(KV.of(key, n));
}
}
};
PCollection<KV<String, Integer>> counts = pipeline.apply(Create.of(input)).apply(ParDo.of(fn));
PAssert.that(counts)
.containsInAnyOrder(
expectedCount.entrySet().stream()
.map(entry -> KV.of(entry.getKey(), entry.getValue()))
.collect(Collectors.toList()));
pipeline.run();
}
|
public static HivePartitionStats fromCommonStats(long rowNums, long totalFileBytes) {
HiveCommonStats commonStats = new HiveCommonStats(rowNums, totalFileBytes);
return new HivePartitionStats(commonStats, ImmutableMap.of());
}
|
@Test
public void testFromCommonStats() {
long rowNums = 5;
long fileSize = 100;
HivePartitionStats hivePartitionStats = HivePartitionStats.fromCommonStats(rowNums, fileSize);
Assert.assertEquals(5, hivePartitionStats.getCommonStats().getRowNums());
Assert.assertEquals(100, hivePartitionStats.getCommonStats().getTotalFileBytes());
Assert.assertTrue(hivePartitionStats.getColumnStats().isEmpty());
}
|
@Override
public void importData(JsonReader reader) throws IOException {
logger.info("Reading configuration for 1.2");
// this *HAS* to start as an object
reader.beginObject();
while (reader.hasNext()) {
JsonToken tok = reader.peek();
switch (tok) {
case NAME:
String name = reader.nextName();
// find out which member it is
if (name.equals(CLIENTS)) {
readClients(reader);
} else if (name.equals(GRANTS)) {
readGrants(reader);
} else if (name.equals(WHITELISTEDSITES)) {
readWhitelistedSites(reader);
} else if (name.equals(BLACKLISTEDSITES)) {
readBlacklistedSites(reader);
} else if (name.equals(AUTHENTICATIONHOLDERS)) {
readAuthenticationHolders(reader);
} else if (name.equals(ACCESSTOKENS)) {
readAccessTokens(reader);
} else if (name.equals(REFRESHTOKENS)) {
readRefreshTokens(reader);
} else if (name.equals(SYSTEMSCOPES)) {
readSystemScopes(reader);
} else {
for (MITREidDataServiceExtension extension : extensions) {
if (extension.supportsVersion(THIS_VERSION)) {
extension.importExtensionData(name, reader);
break;
}
}
// unknown token, skip it
reader.skipValue();
}
break;
case END_OBJECT:
// the object ended, we're done here
reader.endObject();
continue;
default:
logger.debug("Found unexpected entry");
reader.skipValue();
continue;
}
}
fixObjectReferences();
for (MITREidDataServiceExtension extension : extensions) {
if (extension.supportsVersion(THIS_VERSION)) {
extension.fixExtensionObjectReferences(maps);
break;
}
}
maps.clearAll();
}
|
@Test
public void testImportWhitelistedSites() throws IOException {
WhitelistedSite site1 = new WhitelistedSite();
site1.setId(1L);
site1.setClientId("foo");
WhitelistedSite site2 = new WhitelistedSite();
site2.setId(2L);
site2.setClientId("bar");
WhitelistedSite site3 = new WhitelistedSite();
site3.setId(3L);
site3.setClientId("baz");
//site3.setAllowedScopes(null);
String configJson = "{" +
"\"" + MITREidDataService.CLIENTS + "\": [], " +
"\"" + MITREidDataService.ACCESSTOKENS + "\": [], " +
"\"" + MITREidDataService.REFRESHTOKENS + "\": [], " +
"\"" + MITREidDataService.GRANTS + "\": [], " +
"\"" + MITREidDataService.BLACKLISTEDSITES + "\": [], " +
"\"" + MITREidDataService.SYSTEMSCOPES + "\": [], " +
"\"" + MITREidDataService.AUTHENTICATIONHOLDERS + "\": [], " +
"\"" + MITREidDataService.WHITELISTEDSITES + "\": [" +
"{\"id\":1,\"clientId\":\"foo\"}," +
"{\"id\":2,\"clientId\":\"bar\"}," +
"{\"id\":3,\"clientId\":\"baz\"}" +
" ]" +
"}";
logger.debug(configJson);
JsonReader reader = new JsonReader(new StringReader(configJson));
final Map<Long, WhitelistedSite> fakeDb = new HashMap<>();
when(wlSiteRepository.save(isA(WhitelistedSite.class))).thenAnswer(new Answer<WhitelistedSite>() {
Long id = 333L;
@Override
public WhitelistedSite answer(InvocationOnMock invocation) throws Throwable {
WhitelistedSite _site = (WhitelistedSite) invocation.getArguments()[0];
if(_site.getId() == null) {
_site.setId(id++);
}
fakeDb.put(_site.getId(), _site);
return _site;
}
});
when(wlSiteRepository.getById(anyLong())).thenAnswer(new Answer<WhitelistedSite>() {
@Override
public WhitelistedSite answer(InvocationOnMock invocation) throws Throwable {
Long _id = (Long) invocation.getArguments()[0];
return fakeDb.get(_id);
}
});
dataService.importData(reader);
verify(wlSiteRepository, times(3)).save(capturedWhitelistedSites.capture());
List<WhitelistedSite> savedSites = capturedWhitelistedSites.getAllValues();
assertThat(savedSites.size(), is(3));
assertThat(savedSites.get(0).getClientId(), equalTo(site1.getClientId()));
assertThat(savedSites.get(1).getClientId(), equalTo(site2.getClientId()));
assertThat(savedSites.get(2).getClientId(), equalTo(site3.getClientId()));
}
|
public static boolean isVoid(@Nullable Type type) {
return type != null && type.getSort() == Type.VOID;
}
|
@Test
void testIsVoid() {
assertTrue(Types.isVoid(Type.getType("V")));
assertFalse(Types.isVoid(Type.getType("()V")));
assertFalse(Types.isVoid(Type.getType("[V")));
assertFalse(Types.isVoid(null));
}
|
public static DataPermission remove() {
DataPermission dataPermission = DATA_PERMISSIONS.get().removeLast();
// 无元素时,清空 ThreadLocal
if (DATA_PERMISSIONS.get().isEmpty()) {
DATA_PERMISSIONS.remove();
}
return dataPermission;
}
|
@Test
public void testRemove() {
// mock 方法
DataPermission dataPermission01 = mock(DataPermission.class);
DataPermissionContextHolder.add(dataPermission01);
DataPermission dataPermission02 = mock(DataPermission.class);
DataPermissionContextHolder.add(dataPermission02);
// 调用
DataPermission result = DataPermissionContextHolder.remove();
// 断言
assertSame(result, dataPermission02);
assertEquals(1, DataPermissionContextHolder.getAll().size());
}
|
@Override
public void deleteTenant(Long id) {
// 校验存在
validateUpdateTenant(id);
// 删除
tenantMapper.deleteById(id);
}
|
@Test
public void testDeleteTenant_success() {
// mock 数据
TenantDO dbTenant = randomPojo(TenantDO.class,
o -> o.setStatus(randomCommonStatus()));
tenantMapper.insert(dbTenant);// @Sql: 先插入出一条存在的数据
// 准备参数
Long id = dbTenant.getId();
// 调用
tenantService.deleteTenant(id);
// 校验数据不存在了
assertNull(tenantMapper.selectById(id));
}
|
boolean createDatabase(@NotNull String name) throws TException {
return createDatabase(name, null, null, null);
}
|
@Test
public void createExistingDatabase() {
Throwable exception = Assertions.assertThrows(AlreadyExistsException.class,
() -> client.createDatabase(TEST_DATABASE));
}
|
public String getDirectly(final String key) {
try {
byte[] ret = client.getData().forPath(key);
return Objects.isNull(ret) ? null : new String(ret, StandardCharsets.UTF_8);
} catch (Exception e) {
throw new ShenyuException(e);
}
}
|
@Test
void getDirectly() throws Exception {
assertThrows(ShenyuException.class, () -> client.getDirectly("/test"));
GetDataBuilder getDataBuilder = mock(GetDataBuilder.class);
when(curatorFramework.getData()).thenReturn(getDataBuilder);
when(getDataBuilder.forPath(anyString())).thenReturn("hello".getBytes());
String val = client.getDirectly("/test");
assertEquals("hello", val);
when(getDataBuilder.forPath(anyString())).thenReturn(null);
String val2 = client.getDirectly("/test");
assertNull(val2);
}
|
public int getSaveFederationQueuePolicyFailedRetrieved() {
return numSaveFederationQueuePolicyFailedRetrieved.value();
}
|
@Test
public void testSaveFederationQueuePolicyFailedRetrieved() {
long totalBadBefore = metrics.getSaveFederationQueuePolicyFailedRetrieved();
badSubCluster.getSaveFederationQueuePolicyFailedRetrieved();
Assert.assertEquals(totalBadBefore + 1, metrics.getSaveFederationQueuePolicyFailedRetrieved());
}
|
@Override
public List<PinotTaskConfig> generateTasks(List<TableConfig> tableConfigs) {
String taskType = MergeRollupTask.TASK_TYPE;
List<PinotTaskConfig> pinotTaskConfigs = new ArrayList<>();
for (TableConfig tableConfig : tableConfigs) {
if (!validate(tableConfig, taskType)) {
continue;
}
String tableNameWithType = tableConfig.getTableName();
LOGGER.info("Start generating task configs for table: {} for task: {}", tableNameWithType, taskType);
// Get all segment metadata
List<SegmentZKMetadata> allSegments = getSegmentsZKMetadataForTable(tableNameWithType);
// Filter segments based on status
List<SegmentZKMetadata> preSelectedSegmentsBasedOnStatus
= filterSegmentsBasedOnStatus(tableConfig.getTableType(), allSegments);
// Select current segment snapshot based on lineage, filter out empty segments
SegmentLineage segmentLineage = _clusterInfoAccessor.getSegmentLineage(tableNameWithType);
Set<String> preSelectedSegmentsBasedOnLineage = new HashSet<>();
for (SegmentZKMetadata segment : preSelectedSegmentsBasedOnStatus) {
preSelectedSegmentsBasedOnLineage.add(segment.getSegmentName());
}
SegmentLineageUtils.filterSegmentsBasedOnLineageInPlace(preSelectedSegmentsBasedOnLineage, segmentLineage);
List<SegmentZKMetadata> preSelectedSegments = new ArrayList<>();
for (SegmentZKMetadata segment : preSelectedSegmentsBasedOnStatus) {
if (preSelectedSegmentsBasedOnLineage.contains(segment.getSegmentName()) && segment.getTotalDocs() > 0
&& MergeTaskUtils.allowMerge(segment)) {
preSelectedSegments.add(segment);
}
}
if (preSelectedSegments.isEmpty()) {
// Reset the watermark time if no segment found. This covers the case where the table is newly created or
// all segments for the existing table got deleted.
resetDelayMetrics(tableNameWithType);
LOGGER.info("Skip generating task: {} for table: {}, no segment is found.", taskType, tableNameWithType);
continue;
}
// Sort segments based on startTimeMs, endTimeMs and segmentName in ascending order
preSelectedSegments.sort((a, b) -> {
long aStartTime = a.getStartTimeMs();
long bStartTime = b.getStartTimeMs();
if (aStartTime != bStartTime) {
return Long.compare(aStartTime, bStartTime);
}
long aEndTime = a.getEndTimeMs();
long bEndTime = b.getEndTimeMs();
return aEndTime != bEndTime ? Long.compare(aEndTime, bEndTime)
: a.getSegmentName().compareTo(b.getSegmentName());
});
// Sort merge levels based on bucket time period
Map<String, String> taskConfigs = tableConfig.getTaskConfig().getConfigsForTaskType(taskType);
Map<String, Map<String, String>> mergeLevelToConfigs = MergeRollupTaskUtils.getLevelToConfigMap(taskConfigs);
List<Map.Entry<String, Map<String, String>>> sortedMergeLevelConfigs =
new ArrayList<>(mergeLevelToConfigs.entrySet());
sortedMergeLevelConfigs.sort(Comparator.comparingLong(
e -> TimeUtils.convertPeriodToMillis(e.getValue().get(MinionConstants.MergeTask.BUCKET_TIME_PERIOD_KEY))));
// Get incomplete merge levels
Set<String> inCompleteMergeLevels = new HashSet<>();
for (Map.Entry<String, TaskState> entry : TaskGeneratorUtils.getIncompleteTasks(taskType, tableNameWithType,
_clusterInfoAccessor).entrySet()) {
for (PinotTaskConfig taskConfig : _clusterInfoAccessor.getTaskConfigs(entry.getKey())) {
inCompleteMergeLevels.add(taskConfig.getConfigs().get(MergeRollupTask.MERGE_LEVEL_KEY));
}
}
// Get scheduling mode which is "processFromWatermark" by default. If "processAll" mode is enabled, there will be
// no watermark, and each round we pick the buckets in chronological order which have unmerged segments.
boolean processAll = MergeTask.PROCESS_ALL_MODE.equalsIgnoreCase(taskConfigs.get(MergeTask.MODE));
ZNRecord mergeRollupTaskZNRecord = _clusterInfoAccessor
.getMinionTaskMetadataZNRecord(MinionConstants.MergeRollupTask.TASK_TYPE, tableNameWithType);
int expectedVersion = mergeRollupTaskZNRecord != null ? mergeRollupTaskZNRecord.getVersion() : -1;
MergeRollupTaskMetadata mergeRollupTaskMetadata =
mergeRollupTaskZNRecord != null ? MergeRollupTaskMetadata.fromZNRecord(mergeRollupTaskZNRecord)
: new MergeRollupTaskMetadata(tableNameWithType, new TreeMap<>());
List<PinotTaskConfig> pinotTaskConfigsForTable = new ArrayList<>();
// Schedule tasks from lowest to highest merge level (e.g. Hourly -> Daily -> Monthly -> Yearly)
String mergeLevel = null;
for (Map.Entry<String, Map<String, String>> mergeLevelConfig : sortedMergeLevelConfigs) {
String lowerMergeLevel = mergeLevel;
mergeLevel = mergeLevelConfig.getKey();
Map<String, String> mergeConfigs = mergeLevelConfig.getValue();
// Skip scheduling if there's incomplete task for current mergeLevel
if (inCompleteMergeLevels.contains(mergeLevel)) {
LOGGER.info("Found incomplete task of merge level: {} for the same table: {}, Skipping task generation: {}",
mergeLevel, tableNameWithType, taskType);
continue;
}
// Get the bucket size, buffer size and maximum number of parallel buckets (by default 1)
String bucketPeriod = mergeConfigs.get(MergeTask.BUCKET_TIME_PERIOD_KEY);
long bucketMs = TimeUtils.convertPeriodToMillis(bucketPeriod);
if (bucketMs <= 0) {
LOGGER.error("Bucket time period: {} (table : {}, mergeLevel : {}) must be larger than 0", bucketPeriod,
tableNameWithType, mergeLevel);
continue;
}
String bufferPeriod = mergeConfigs.get(MergeTask.BUFFER_TIME_PERIOD_KEY);
long bufferMs = TimeUtils.convertPeriodToMillis(bufferPeriod);
if (bufferMs < 0) {
LOGGER.error("Buffer time period: {} (table : {}, mergeLevel : {}) must be larger or equal to 0",
bufferPeriod, tableNameWithType, mergeLevel);
continue;
}
String maxNumParallelBucketsStr = mergeConfigs.get(MergeTask.MAX_NUM_PARALLEL_BUCKETS);
int maxNumParallelBuckets = maxNumParallelBucketsStr != null ? Integer.parseInt(maxNumParallelBucketsStr)
: DEFAULT_NUM_PARALLEL_BUCKETS;
if (maxNumParallelBuckets <= 0) {
LOGGER.error("Maximum number of parallel buckets: {} (table : {}, mergeLevel : {}) must be larger than 0",
maxNumParallelBuckets, tableNameWithType, mergeLevel);
continue;
}
// Get bucket start/end time
long preSelectedSegStartTimeMs = preSelectedSegments.get(0).getStartTimeMs();
long bucketStartMs = preSelectedSegStartTimeMs / bucketMs * bucketMs;
long watermarkMs = 0;
if (!processAll) {
// Get watermark from MergeRollupTaskMetadata ZNode
// bucketStartMs = watermarkMs
// bucketEndMs = bucketStartMs + bucketMs
watermarkMs = getWatermarkMs(preSelectedSegStartTimeMs, bucketMs, mergeLevel,
mergeRollupTaskMetadata);
bucketStartMs = watermarkMs;
}
long bucketEndMs = bucketStartMs + bucketMs;
if (lowerMergeLevel == null) {
long lowestLevelMaxValidBucketEndTimeMs = Long.MIN_VALUE;
for (SegmentZKMetadata preSelectedSegment : preSelectedSegments) {
// Compute lowestLevelMaxValidBucketEndTimeMs among segments that are ready for merge
long currentValidBucketEndTimeMs =
getValidBucketEndTimeMsForSegment(preSelectedSegment, bucketMs, bufferMs);
lowestLevelMaxValidBucketEndTimeMs =
Math.max(lowestLevelMaxValidBucketEndTimeMs, currentValidBucketEndTimeMs);
}
_tableLowestLevelMaxValidBucketEndTimeMs.put(tableNameWithType, lowestLevelMaxValidBucketEndTimeMs);
}
// Create metrics even if there's no task scheduled, this helps the case that the controller is restarted
// but the metrics are not available until the controller schedules a valid task
List<String> sortedMergeLevels =
sortedMergeLevelConfigs.stream().map(e -> e.getKey()).collect(Collectors.toList());
if (processAll) {
createOrUpdateNumBucketsToProcessMetrics(tableNameWithType, mergeLevel, lowerMergeLevel, bufferMs, bucketMs,
preSelectedSegments, sortedMergeLevels);
} else {
createOrUpdateDelayMetrics(tableNameWithType, mergeLevel, null, watermarkMs, bufferMs, bucketMs);
}
if (!isValidBucketEndTime(bucketEndMs, bufferMs, lowerMergeLevel, mergeRollupTaskMetadata, processAll)) {
LOGGER.info("Bucket with start: {} and end: {} (table : {}, mergeLevel : {}, mode : {}) cannot be merged yet",
bucketStartMs, bucketEndMs, tableNameWithType, mergeLevel, processAll ? MergeTask.PROCESS_ALL_MODE
: MergeTask.PROCESS_FROM_WATERMARK_MODE);
continue;
}
// Find overlapping segments for each bucket, skip the buckets that has all segments merged
List<List<SegmentZKMetadata>> selectedSegmentsForAllBuckets = new ArrayList<>(maxNumParallelBuckets);
List<SegmentZKMetadata> selectedSegmentsForBucket = new ArrayList<>();
boolean hasUnmergedSegments = false;
boolean hasSpilledOverData = false;
boolean areAllSegmentsReadyToMerge = true;
// The for loop terminates in following cases:
// 1. Found buckets with unmerged segments:
// For each bucket find all segments overlapping with the target bucket, skip the bucket if all overlapping
// segments are merged. Schedule k (numParallelBuckets) buckets at most, and stops at the first bucket that
// contains spilled over data.
// One may wonder how a segment with records spanning different buckets is handled. The short answer is that
// it will be cut into multiple segments, each for a separate bucket. This is achieved by setting bucket time
// period as PARTITION_BUCKET_TIME_PERIOD when generating PinotTaskConfigs
// 2. There's no bucket with unmerged segments, skip scheduling
for (SegmentZKMetadata preSelectedSegment : preSelectedSegments) {
long startTimeMs = preSelectedSegment.getStartTimeMs();
if (startTimeMs < bucketEndMs) {
long endTimeMs = preSelectedSegment.getEndTimeMs();
if (endTimeMs >= bucketStartMs) {
// For segments overlapping with current bucket, add to the result list
if (!isMergedSegment(preSelectedSegment, mergeLevel, sortedMergeLevels)) {
hasUnmergedSegments = true;
}
if (!isMergedSegment(preSelectedSegment, lowerMergeLevel, sortedMergeLevels)) {
areAllSegmentsReadyToMerge = false;
}
if (hasSpilledOverData(preSelectedSegment, bucketMs)) {
hasSpilledOverData = true;
}
selectedSegmentsForBucket.add(preSelectedSegment);
}
// endTimeMs < bucketStartMs
// Haven't find the first overlapping segment, continue to the next segment
} else {
// Has gone through all overlapping segments for current bucket
if (hasUnmergedSegments && areAllSegmentsReadyToMerge) {
// Add the bucket if there are unmerged segments
selectedSegmentsForAllBuckets.add(selectedSegmentsForBucket);
}
if (selectedSegmentsForAllBuckets.size() == maxNumParallelBuckets || hasSpilledOverData) {
// If there are enough buckets or found spilled over data, schedule merge tasks
break;
} else {
// Start with a new bucket
// TODO: If there are many small merged segments, we should merge them again
selectedSegmentsForBucket = new ArrayList<>();
hasUnmergedSegments = false;
areAllSegmentsReadyToMerge = true;
bucketStartMs = (startTimeMs / bucketMs) * bucketMs;
bucketEndMs = bucketStartMs + bucketMs;
if (!isValidBucketEndTime(bucketEndMs, bufferMs, lowerMergeLevel, mergeRollupTaskMetadata, processAll)) {
break;
}
if (!isMergedSegment(preSelectedSegment, mergeLevel, sortedMergeLevels)) {
hasUnmergedSegments = true;
}
if (!isMergedSegment(preSelectedSegment, lowerMergeLevel, sortedMergeLevels)) {
areAllSegmentsReadyToMerge = false;
}
if (hasSpilledOverData(preSelectedSegment, bucketMs)) {
hasSpilledOverData = true;
}
selectedSegmentsForBucket.add(preSelectedSegment);
}
}
}
// Add the last bucket if it contains unmerged segments and is not added before
if (hasUnmergedSegments && areAllSegmentsReadyToMerge && (selectedSegmentsForAllBuckets.isEmpty() || (
selectedSegmentsForAllBuckets.get(selectedSegmentsForAllBuckets.size() - 1)
!= selectedSegmentsForBucket))) {
selectedSegmentsForAllBuckets.add(selectedSegmentsForBucket);
}
if (selectedSegmentsForAllBuckets.isEmpty()) {
LOGGER.info("No unmerged segment found for table: {}, mergeLevel: {}", tableNameWithType, mergeLevel);
continue;
}
// Bump up watermark to the earliest start time of selected segments truncated to the closest bucket boundary
long newWatermarkMs = selectedSegmentsForAllBuckets.get(0).get(0).getStartTimeMs() / bucketMs * bucketMs;
mergeRollupTaskMetadata.getWatermarkMap().put(mergeLevel, newWatermarkMs);
LOGGER.info("Update watermark for table: {}, mergeLevel: {} from: {} to: {}", tableNameWithType, mergeLevel,
watermarkMs, newWatermarkMs);
// Update the delay metrics
if (!processAll) {
createOrUpdateDelayMetrics(tableNameWithType, mergeLevel, lowerMergeLevel, newWatermarkMs, bufferMs,
bucketMs);
}
// Create task configs
int maxNumRecordsPerTask =
mergeConfigs.get(MergeRollupTask.MAX_NUM_RECORDS_PER_TASK_KEY) != null ? Integer.parseInt(
mergeConfigs.get(MergeRollupTask.MAX_NUM_RECORDS_PER_TASK_KEY)) : DEFAULT_MAX_NUM_RECORDS_PER_TASK;
SegmentPartitionConfig segmentPartitionConfig = tableConfig.getIndexingConfig().getSegmentPartitionConfig();
if (segmentPartitionConfig == null) {
for (List<SegmentZKMetadata> selectedSegmentsPerBucket : selectedSegmentsForAllBuckets) {
pinotTaskConfigsForTable.addAll(
createPinotTaskConfigs(selectedSegmentsPerBucket, tableConfig, maxNumRecordsPerTask, mergeLevel,
null, mergeConfigs, taskConfigs));
}
} else {
// For partitioned table, schedule separate tasks for each partitionId (partitionId is constructed from
// partitions of all partition columns. There should be exact match between partition columns of segment and
// partition columns of table configuration, and there is only partition per column in segment metadata).
// Other segments which do not meet these conditions are considered as outlier segments, and additional tasks
// are generated for them.
Map<String, ColumnPartitionConfig> columnPartitionMap = segmentPartitionConfig.getColumnPartitionMap();
List<String> partitionColumns = new ArrayList<>(columnPartitionMap.keySet());
for (List<SegmentZKMetadata> selectedSegmentsPerBucket : selectedSegmentsForAllBuckets) {
Map<List<Integer>, List<SegmentZKMetadata>> partitionToSegments = new HashMap<>();
List<SegmentZKMetadata> outlierSegments = new ArrayList<>();
for (SegmentZKMetadata selectedSegment : selectedSegmentsPerBucket) {
SegmentPartitionMetadata segmentPartitionMetadata = selectedSegment.getPartitionMetadata();
List<Integer> partitions = new ArrayList<>();
if (segmentPartitionMetadata != null && columnPartitionMap.keySet()
.equals(segmentPartitionMetadata.getColumnPartitionMap().keySet())) {
for (String partitionColumn : partitionColumns) {
if (segmentPartitionMetadata.getPartitions(partitionColumn).size() == 1) {
partitions.add(segmentPartitionMetadata.getPartitions(partitionColumn).iterator().next());
} else {
partitions.clear();
break;
}
}
}
if (partitions.isEmpty()) {
outlierSegments.add(selectedSegment);
} else {
partitionToSegments.computeIfAbsent(partitions, k -> new ArrayList<>()).add(selectedSegment);
}
}
for (Map.Entry<List<Integer>, List<SegmentZKMetadata>> entry : partitionToSegments.entrySet()) {
List<Integer> partition = entry.getKey();
List<SegmentZKMetadata> partitionedSegments = entry.getValue();
pinotTaskConfigsForTable.addAll(
createPinotTaskConfigs(partitionedSegments, tableConfig, maxNumRecordsPerTask, mergeLevel,
partition, mergeConfigs, taskConfigs));
}
if (!outlierSegments.isEmpty()) {
pinotTaskConfigsForTable.addAll(
createPinotTaskConfigs(outlierSegments, tableConfig, maxNumRecordsPerTask, mergeLevel,
null, mergeConfigs, taskConfigs));
}
}
}
}
// Write updated watermark map to zookeeper
if (!processAll) {
try {
_clusterInfoAccessor
.setMinionTaskMetadata(mergeRollupTaskMetadata, MinionConstants.MergeRollupTask.TASK_TYPE,
expectedVersion);
} catch (ZkException e) {
LOGGER.error(
"Version changed while updating merge/rollup task metadata for table: {}, skip scheduling. There are "
+ "multiple task schedulers for the same table, need to investigate!", tableNameWithType);
continue;
}
}
pinotTaskConfigs.addAll(pinotTaskConfigsForTable);
LOGGER.info("Finished generating task configs for table: {} for task: {}, numTasks: {}", tableNameWithType,
taskType, pinotTaskConfigsForTable.size());
}
// Clean up metrics
cleanUpDelayMetrics(tableConfigs);
return pinotTaskConfigs;
}
|
@Test
public void testBufferTime() {
Map<String, Map<String, String>> taskConfigsMap = new HashMap<>();
Map<String, String> tableTaskConfigs = new HashMap<>();
tableTaskConfigs.put("daily.mergeType", "concat");
tableTaskConfigs.put("daily.bufferTimePeriod", "1d");
tableTaskConfigs.put("daily.bucketTimePeriod", "1d");
tableTaskConfigs.put("daily.maxNumRecordsPerSegment", "1000000");
taskConfigsMap.put(MinionConstants.MergeRollupTask.TASK_TYPE, tableTaskConfigs);
TableConfig offlineTableConfig = getTableConfig(TableType.OFFLINE, taskConfigsMap);
ClusterInfoAccessor mockClusterInfoProvide = mock(ClusterInfoAccessor.class);
String segmentName1 = "testTable__1";
long currentTime = System.currentTimeMillis();
SegmentZKMetadata metadata1 =
getSegmentZKMetadata(segmentName1, currentTime - 500_000L, currentTime, TimeUnit.MILLISECONDS, null);
when(mockClusterInfoProvide.getSegmentsZKMetadata(OFFLINE_TABLE_NAME)).thenReturn(Lists.newArrayList(metadata1));
when(mockClusterInfoProvide.getIdealState(OFFLINE_TABLE_NAME)).thenReturn(
getIdealState(OFFLINE_TABLE_NAME, Lists.newArrayList(segmentName1)));
MergeRollupTaskGenerator generator = new MergeRollupTaskGenerator();
generator.init(mockClusterInfoProvide);
List<PinotTaskConfig> pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(offlineTableConfig));
assertEquals(pinotTaskConfigs.size(), 0);
}
|
@Override
public Object handle(ProceedingJoinPoint proceedingJoinPoint, Retry retry, String methodName)
throws Throwable {
RetryTransformer<?> retryTransformer = RetryTransformer.of(retry);
Object returnValue = proceedingJoinPoint.proceed();
return executeRxJava3Aspect(retryTransformer, returnValue);
}
|
@Test
public void testReactorTypes() throws Throwable {
Retry retry = Retry.ofDefaults("test");
when(proceedingJoinPoint.proceed()).thenReturn(Single.just("Test"));
assertThat(rxJava3RetryAspectExt.handle(proceedingJoinPoint, retry, "testMethod"))
.isNotNull();
when(proceedingJoinPoint.proceed()).thenReturn(Flowable.just("Test"));
assertThat(rxJava3RetryAspectExt.handle(proceedingJoinPoint, retry, "testMethod"))
.isNotNull();
}
|
public static String toAlias(String str) {
return toAlias(str, FitzpatrickAction.PARSE);
}
|
@Test
public void toAliasTest() {
String alias = EmojiUtil.toAlias("😄");
assertEquals(":smile:", alias);
}
|
public ArtifactResolveRequest startArtifactResolveProcess(HttpServletRequest httpServletRequest) throws SamlParseException {
try {
final var artifactResolveRequest = validateRequest(httpServletRequest);
final var samlSession = updateArtifactResolveRequestWithSamlSession(artifactResolveRequest);
validateArtifactResolve(artifactResolveRequest);
dcMetadataService.resolveDcMetadata(artifactResolveRequest);
signatureService.validateSamlRequest(artifactResolveRequest, artifactResolveRequest.getArtifactResolve().getSignature());
createAdAuthentication(samlSession, artifactResolveRequest);
samlSessionService.updateSamlSession(artifactResolveRequest);
return artifactResolveRequest;
} catch (MessageDecodingException e) {
throw new SamlParseException("ArtifactResolveRequest soap11 decode exception", e);
} catch (ComponentInitializationException e) {
throw new SamlParseException("ArtifactResolveRequest initialization exception", e);
} catch (SamlSessionException e) {
throw new SamlParseException("Failed to load saml session", e);
} catch (AdException e) {
throw new SamlParseException("Failed to create an authentication", e);
} catch (DienstencatalogusException e) {
throw new SamlParseException("Failed to retrieve metadata from DienstenCatalogus", e);
} catch (SamlValidationException e) {
throw new SamlParseException("ArtifactResolve not valid", e);
} catch (ValidationException e) {
throw new SamlParseException("Failed to validate", e);
} catch (SharedServiceClientException e) {
throw new SamlParseException("Failed to retrieve data from sharedServiceClient.getSSConfigLong", e);
}
}
|
@Test
void parseArtifactResolveInvalidVersion() throws SamlSessionException {
when(samlSessionServiceMock.loadSession(anyString())).thenReturn(samlSession);
SamlParseException exception = assertThrows(SamlParseException.class, () ->
artifactResolveService.startArtifactResolveProcess(prepareSoapRequest(artifactResolveRequestInvalidVersion)));
assertEquals("ArtifactResolve not valid", exception.getMessage());
}
|
@Override
public Path move(final Path file, final Path renamed, final TransferStatus status,
final Delete.Callback deleteCallback, final ConnectionCallback connectionCallback) throws BackgroundException {
try {
session.getClient().move(file.getAbsolute(), renamed.getAbsolute());
// Copy original file attributes
return renamed.withAttributes(file.attributes());
}
catch(MantaException e) {
throw new MantaExceptionMappingService().map("Cannot rename {0}", e, file);
}
catch(MantaClientHttpResponseException e) {
throw new MantaHttpExceptionMappingService().map("Cannot rename {0}", e, file);
}
catch(IOException e) {
throw new DefaultIOExceptionMappingService().map("Cannot rename {0}", e, file);
}
}
|
@Test
public void testRename() throws BackgroundException {
final Touch touch = new MantaTouchFeature(session);
final Move move = new MantaMoveFeature(session);
final Delete delete = new MantaDeleteFeature(session);
final AttributesFinder attributesFinder = new MantaAttributesFinderFeature(session);
final Path drive = new MantaDirectoryFeature(session).mkdir(randomDirectory(), new TransferStatus());
final Path file = new Path(drive, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
touch.touch(file, new TransferStatus().withMime("x-application/cyberduck"));
assertNotNull(attributesFinder.find(file));
Path rename = new Path(drive, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
assertTrue(move.isSupported(file, rename));
assertEquals(rename, move.move(file, rename, new TransferStatus(), new Delete.DisabledCallback(), new DisabledConnectionCallback()));
assertFalse(new MantaFindFeature(session).find(file));
assertTrue(new MantaFindFeature(session).find(rename));
assertNotNull(attributesFinder.find(rename));
delete.delete(Collections.singletonList(rename), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
public static <@NonNull E> CompletableSource resolveScopeFromLifecycle(
final LifecycleScopeProvider<E> provider) throws OutsideScopeException {
return resolveScopeFromLifecycle(provider, true);
}
|
@Test
public void resolveScopeFromLifecycle_normal_notComparable() {
PublishSubject<IntHolder> lifecycle = PublishSubject.create();
TestObserver<?> o = testSource(resolveScopeFromLifecycle(lifecycle, new IntHolder(3)));
lifecycle.onNext(new IntHolder(0));
o.assertNoErrors().assertNotComplete();
lifecycle.onNext(new IntHolder(1));
o.assertNoErrors().assertNotComplete();
lifecycle.onNext(new IntHolder(0));
o.assertNoErrors().assertNotComplete();
lifecycle.onNext(new IntHolder(2));
o.assertNoErrors().assertNotComplete();
// Now we end
lifecycle.onNext(new IntHolder(3));
o.assertComplete();
}
|
public RingbufferStoreConfig setStoreImplementation(@Nonnull RingbufferStore storeImplementation) {
this.storeImplementation = checkNotNull(storeImplementation, "Ringbuffer store cannot be null!");
this.className = null;
return this;
}
|
@Test
public void setStoreImplementation() {
SerializationService serializationService = new DefaultSerializationServiceBuilder().build();
RingbufferStore<Data> store = RingbufferStoreWrapper.create(
RingbufferService.getRingbufferNamespace("name"),
config, OBJECT, serializationService, null,
mock(NodeEngine.class), null);
config.setStoreImplementation(store);
assertEquals(store, config.getStoreImplementation());
}
|
static JobManagerProcessSpec processSpecFromConfig(Configuration config) {
return createMemoryProcessSpec(PROCESS_MEMORY_UTILS.memoryProcessSpecFromConfig(config));
}
|
@Test
void testOffHeapMemoryDerivedFromJvmHeapAndTotalFlinkMemory() {
MemorySize jvmHeap = MemorySize.ofMebiBytes(150);
MemorySize defaultOffHeap = JobManagerOptions.OFF_HEAP_MEMORY.defaultValue();
MemorySize expectedOffHeap = MemorySize.ofMebiBytes(100).add(defaultOffHeap);
MemorySize totalFlinkMemory = jvmHeap.add(expectedOffHeap);
Configuration conf = new Configuration();
conf.set(JobManagerOptions.TOTAL_FLINK_MEMORY, totalFlinkMemory);
conf.set(JobManagerOptions.JVM_HEAP_MEMORY, jvmHeap);
JobManagerProcessSpec jobManagerProcessSpec =
JobManagerProcessUtils.processSpecFromConfig(conf);
assertThat(jobManagerProcessSpec.getJvmDirectMemorySize()).isEqualTo(expectedOffHeap);
assertThat(testLoggerResource.getMessages())
.anyMatch(
str ->
str.contains(
String.format(
"The Off-Heap Memory size (%s) is derived the configured Total Flink Memory size (%s) minus "
+ "the configured JVM Heap Memory size (%s). The default Off-Heap Memory size (%s) is ignored.",
expectedOffHeap.toHumanReadableString(),
totalFlinkMemory.toHumanReadableString(),
jvmHeap.toHumanReadableString(),
defaultOffHeap.toHumanReadableString())));
}
|
@Override
public void validateDeptList(Collection<Long> ids) {
if (CollUtil.isEmpty(ids)) {
return;
}
// 获得科室信息
Map<Long, DeptDO> deptMap = getDeptMap(ids);
// 校验
ids.forEach(id -> {
DeptDO dept = deptMap.get(id);
if (dept == null) {
throw exception(DEPT_NOT_FOUND);
}
if (!CommonStatusEnum.ENABLE.getStatus().equals(dept.getStatus())) {
throw exception(DEPT_NOT_ENABLE, dept.getName());
}
});
}
|
@Test
public void testValidateDeptList_notEnable() {
// mock 数据
DeptDO deptDO = randomPojo(DeptDO.class).setStatus(CommonStatusEnum.DISABLE.getStatus());
deptMapper.insert(deptDO);
// 准备参数
List<Long> ids = singletonList(deptDO.getId());
// 调用, 并断言异常
assertServiceException(() -> deptService.validateDeptList(ids), DEPT_NOT_ENABLE, deptDO.getName());
}
|
public static <K,V> Map<K,Pair<List<V>,List<V>>> cogroup(List<Pair<K,V>> left,List<Pair<K,V>> right) {
Map<K,Pair<List<V>,List<V>>> ret = new HashMap<>();
//group by key first to consolidate values
Map<K,List<V>> leftMap = groupByKey(left);
Map<K,List<V>> rightMap = groupByKey(right);
/**
* Iterate over each key in the list
* adding values to the left items
* as values are found in the list.
*/
for(Map.Entry<K,List<V>> entry : leftMap.entrySet()) {
K key = entry.getKey();
if(!ret.containsKey(key)) {
List<V> leftListPair = new ArrayList<>();
List<V> rightListPair = new ArrayList<>();
Pair<List<V>,List<V>> p = Pair.of(leftListPair,rightListPair);
ret.put(key,p);
}
Pair<List<V>,List<V>> p = ret.get(key);
p.getFirst().addAll(entry.getValue());
}
/**
* Iterate over each key in the list
* adding values to the right items
* as values are found in the list.
*/
for(Map.Entry<K,List<V>> entry : rightMap.entrySet()) {
K key = entry.getKey();
if(!ret.containsKey(key)) {
List<V> leftListPair = new ArrayList<>();
List<V> rightListPair = new ArrayList<>();
Pair<List<V>,List<V>> p = Pair.of(leftListPair,rightListPair);
ret.put(key,p);
}
Pair<List<V>,List<V>> p = ret.get(key);
p.getSecond().addAll(entry.getValue());
}
return ret;
}
|
@Test
public void testCoGroup() {
List<Pair<String,String>> leftMap = new ArrayList<>();
List<Pair<String,String>> rightMap = new ArrayList<>();
leftMap.add(Pair.of("cat","adam"));
leftMap.add(Pair.of("dog","adam"));
rightMap.add(Pair.of("fish","alex"));
rightMap.add(Pair.of("cat","alice"));
rightMap.add(Pair.of("dog","steve"));
//[(fish,([],[alex])), (dog,([adam],[steve])), (cat,([adam],[alice]))]
Map<String,Pair<List<String>,List<String>>> assertion = new HashMap<>();
assertion.put("cat",Pair.of(Arrays.asList("adam"),Arrays.asList("alice")));
assertion.put("dog",Pair.of(Arrays.asList("adam"),Arrays.asList("steve")));
assertion.put("fish",Pair.of(Collections.<String>emptyList(),Arrays.asList("alex")));
Map<String, Pair<List<String>, List<String>>> cogroup = FunctionalUtils.cogroup(leftMap, rightMap);
assertEquals(assertion,cogroup);
}
|
public static long sizeOf(Path path) throws IOException {
SizeVisitor visitor = new SizeVisitor();
Files.walkFileTree(path, visitor);
return visitor.size;
}
|
@Test
public void sizeOf_is_zero_on_empty_files() throws IOException {
File file = temporaryFolder.newFile();
assertThat(FileUtils2.sizeOf(file.toPath())).isZero();
}
|
public static short translateBucketAcl(AccessControlList acl, String userId) {
short mode = (short) 0;
for (Grant grant : acl.getGrantsAsList()) {
Permission perm = grant.getPermission();
Grantee grantee = grant.getGrantee();
if (perm.equals(Permission.Read)) {
if (isUserIdInGrantee(grantee, userId)) {
// If the bucket is readable by the user, add r and x to the owner mode.
mode |= (short) 0500;
}
} else if (perm.equals(Permission.Write)) {
if (isUserIdInGrantee(grantee, userId)) {
// If the bucket is writable by the user, +w to the owner mode.
mode |= (short) 0200;
}
} else if (perm.equals(Permission.FullControl)) {
if (isUserIdInGrantee(grantee, userId)) {
// If the user has full control to the bucket, +rwx to the owner mode.
mode |= (short) 0700;
}
}
}
return mode;
}
|
@Test
public void translateEveryoneReadPermission() {
GroupGrantee allUsersGrantee = GroupGrantee.AllUsers;
mAcl.grantPermission(allUsersGrantee, Permission.Read);
Assert.assertEquals((short) 0500, S3AUtils.translateBucketAcl(mAcl, ID));
Assert.assertEquals((short) 0500, S3AUtils.translateBucketAcl(mAcl, OTHER_ID));
}
|
void generate(MessageSpec message) throws Exception {
if (message.struct().versions().contains(Short.MAX_VALUE)) {
throw new RuntimeException("Message " + message.name() + " does " +
"not specify a maximum version.");
}
structRegistry.register(message);
schemaGenerator.generateSchemas(message);
messageFlexibleVersions = message.flexibleVersions();
generateClass(Optional.of(message),
message.dataClassName(),
message.struct(),
message.struct().versions());
headerGenerator.generate();
}
|
@Test
public void testInvalidNullDefaultForPotentiallyNonNullableStruct() throws Exception {
MessageSpec testMessageSpec = MessageGenerator.JSON_SERDE.readValue(String.join("", Arrays.asList(
"{",
" \"type\": \"request\",",
" \"name\": \"FooBar\",",
" \"validVersions\": \"0-1\",",
" \"flexibleVersions\": \"none\",",
" \"fields\": [",
" { \"name\": \"struct1\", \"type\": \"MyStruct\", \"versions\": \"0+\", \"nullableVersions\": \"1+\", ",
" \"default\": \"null\", \"fields\": [",
" { \"name\": \"field1\", \"type\": \"string\", \"versions\": \"0+\" }",
" ]",
" }",
" ]",
"}")), MessageSpec.class);
assertStringContains("not all versions of this field are nullable",
assertThrows(RuntimeException.class, () -> {
new MessageDataGenerator("org.apache.kafka.common.message").generate(testMessageSpec);
}).getMessage());
}
|
static <T> T getWildcardMappedObject(final Map<String, T> mapping, final String query) {
T value = mapping.get(query);
if (value == null) {
for (String key : mapping.keySet()) {
// Turn the search key into a regex, using all characters but the * as a literal.
String regex = Arrays.stream(key.split("\\*")) // split in parts that do not have a wildcard in them
.map(Pattern::quote) // each part should be used as a literal (not as a regex or partial regex)
.collect(Collectors.joining(".*")); // join all literal parts with a regex representation on the wildcard.
if (key.endsWith("*")) { // the 'split' will have removed any trailing wildcard characters. Correct for that.
regex += ".*";
}
if (query.matches(regex)) {
value = mapping.get(key);
break;
}
}
}
return value;
}
|
@Test
public void testWildcardExtension() throws Exception
{
// Setup test fixture.
final Map<String, Object> haystack = Map.of("myplugin/*.jsp", new Object());
// Execute system under test.
final Object result = PluginServlet.getWildcardMappedObject(haystack, "myplugin/foo.jsp");
// Verify results.
assertNotNull(result);
}
|
@Override
public String validateConfigurationRequestBody(Map<String, String> configuration) {
return NULLS_GSON.toJson(configuration);
}
|
@Test
public void validateConfigurationRequestBody_shouldSerializeConfigurationToJson() {
final ArtifactMessageConverterV2 converter = new ArtifactMessageConverterV2();
final String requestBody = converter.validateConfigurationRequestBody(Map.of("Foo", "Bar"));
assertThatJson("{\"Foo\":\"Bar\"}").isEqualTo(requestBody);
}
|
public MessageExt lookMessageByOffset(final long commitLogOffset) {
return this.store.lookMessageByOffset(commitLogOffset);
}
|
@Test
public void testLookMessageByOffset() {
when(messageStore.lookMessageByOffset(anyLong())).thenReturn(new MessageExt());
MessageExt messageExt = transactionBridge.lookMessageByOffset(123);
assertThat(messageExt).isNotNull();
}
|
@Override
public KeyValueIterator<Windowed<K>, V> backwardFetch(final K key) {
Objects.requireNonNull(key, "key cannot be null");
return new MeteredWindowedKeyValueIterator<>(
wrapped().backwardFetch(keyBytes(key)),
fetchSensor,
iteratorDurationSensor,
streamsMetrics,
serdes::keyFrom,
serdes::valueFrom,
time,
numOpenIterators,
openIterators
);
}
|
@Test
public void shouldThrowNullPointerOnBackwardFetchIfFromIsNull() {
setUpWithoutContext();
assertThrows(NullPointerException.class, () -> store.backwardFetch(null, "to"));
}
|
private Object getKey(WindowedValue<InT> elem) {
KV<?, ?> kv = (KV<?, ?>) elem.getValue();
if (kv == null) {
return NULL_KEY;
} else {
Object key = kv.getKey();
return key == null ? NULL_KEY : key;
}
}
|
@Test
@Ignore("https://github.com/apache/beam/issues/23745")
public void testPipelineWithAggregation() {
final List<KV<String, Long>> input =
new ArrayList<>(
Arrays.asList(
KV.of("apple", 2L),
KV.of("banana", 5L),
KV.of("apple", 8L),
KV.of("grape", 10L),
KV.of("banana", 5L)));
// TODO: remove after SAMZA-2761 fix
for (int i = 0; i < 50; i++) {
input.add(KV.of("*", 0L));
}
PCollection<KV<String, Long>> sums =
pipeline
.apply(Create.of(input))
.apply(Filter.by(x -> !x.getKey().equals("*")))
.apply(Sum.longsPerKey());
PAssert.that(sums)
.containsInAnyOrder(
Arrays.asList(KV.of("apple", 10L), KV.of("banana", 10L), KV.of("grape", 10L)));
pipeline.run();
}
|
@Transactional
public void updatePassword(String phone, String newPassword) {
User user = readUserOrThrow(phone);
user.updatePassword(passwordEncoderHelper.encodePassword(newPassword));
}
|
@DisplayName("존재하지 않는 사용자의 번호로 비밀번호 변경 요청이 올 경우 UserErrorException을 발생시킨다.")
@Test
void updatePasswordIfUserNotFound() {
// given
String phone = "010-1234-5678";
String newPassword = "newPassword123";
given(userService.readUserByPhone(phone)).willReturn(Optional.empty());
// when - then
assertThrows(UserErrorException.class, () -> authFindService.updatePassword(phone, newPassword));
}
|
public synchronized List<Page> getPages(
Long tableId,
int partNumber,
int totalParts,
List<Integer> columnIndexes,
long expectedRows)
{
if (!contains(tableId)) {
throw new PrestoException(MISSING_DATA, "Failed to find table on a worker.");
}
TableData tableData = tables.get(tableId);
if (tableData.getRows() < expectedRows) {
throw new PrestoException(MISSING_DATA,
format("Expected to find [%s] rows on a worker, but found [%s].", expectedRows, tableData.getRows()));
}
ImmutableList.Builder<Page> partitionedPages = ImmutableList.builder();
for (int i = partNumber; i < tableData.getPages().size(); i += totalParts) {
partitionedPages.add(getColumns(tableData.getPages().get(i), columnIndexes));
}
return partitionedPages.build();
}
|
@Test
public void testInsertPageWithoutCreate()
{
insertToTable(0L, 0L);
assertEquals(pagesStore.getPages(0L, 0, 1, ImmutableList.of(0), POSITIONS_PER_PAGE).size(), 1);
}
|
@Override
public Prepare.PreparingTable getTable(List<String> names) {
Prepare.PreparingTable originRelOptTable = super.getTable(names);
if (originRelOptTable == null) {
return null;
} else {
// Wrap as FlinkPreparingTableBase to use in query optimization.
CatalogSchemaTable table = originRelOptTable.unwrap(CatalogSchemaTable.class);
if (table != null) {
return toPreparingTable(
originRelOptTable.getRelOptSchema(),
originRelOptTable.getQualifiedName(),
originRelOptTable.getRowType(),
table);
} else {
return originRelOptTable;
}
}
}
|
@Test
void testGetFlinkPreparingTableBase() {
// Mock CatalogSchemaTable.
final ObjectIdentifier objectIdentifier = ObjectIdentifier.of("a", "b", "c");
final ResolvedSchema schema =
new ResolvedSchema(Collections.emptyList(), Collections.emptyList(), null);
final CatalogTable catalogTable =
ConnectorCatalogTable.source(
new TestTableSource(true, TableSchema.fromResolvedSchema(schema)), true);
final ResolvedCatalogTable resolvedCatalogTable =
new ResolvedCatalogTable(catalogTable, schema);
CatalogSchemaTable mockTable =
new CatalogSchemaTable(
ContextResolvedTable.permanent(
objectIdentifier,
CatalogManagerMocks.createEmptyCatalog(),
resolvedCatalogTable),
FlinkStatistic.UNKNOWN(),
true);
rootSchemaPlus.add(tableMockName, mockTable);
Prepare.PreparingTable preparingTable =
catalogReader.getTable(Collections.singletonList(tableMockName));
assertThat(preparingTable).isInstanceOf(FlinkPreparingTableBase.class);
}
|
@Udf(description = "Returns the base 10 logarithm of an INT value.")
public Double log(
@UdfParameter(
value = "value",
description = "the value get the base 10 logarithm of."
) final Integer value
) {
return log(value == null ? null : value.doubleValue());
}
|
@Test
public void shouldHandleNegativeBase() {
assertThat(Double.isNaN(udf.log(-15, 13)), is(true));
assertThat(Double.isNaN(udf.log(-15L, 13L)), is(true));
assertThat(Double.isNaN(udf.log(-15.0, 13.0)), is(true));
}
|
public ArtifactResponse buildArtifactResponse(ArtifactResolveRequest artifactResolveRequest, String entityId, SignType signType) throws InstantiationException, ValidationException, ArtifactBuildException, BvdException {
final var artifactResponse = OpenSAMLUtils.buildSAMLObject(ArtifactResponse.class);
final var status = OpenSAMLUtils.buildSAMLObject(Status.class);
final var statusCode = OpenSAMLUtils.buildSAMLObject(StatusCode.class);
final var issuer = OpenSAMLUtils.buildSAMLObject(Issuer.class);
return ArtifactResponseBuilder
.newInstance(artifactResponse)
.addID()
.addIssueInstant()
.addInResponseTo(artifactResolveRequest.getArtifactResolve().getID())
.addStatus(StatusBuilder
.newInstance(status)
.addStatusCode(statusCode, StatusCode.SUCCESS)
.build())
.addIssuer(issuer, entityId)
.addMessage(buildResponse(artifactResolveRequest, entityId, signType))
.addSignature(signatureService, signType)
.build();
}
|
@Test
void validateAssertionIsNotPresent() throws ValidationException, SamlParseException, ArtifactBuildException, BvdException, InstantiationException {
ArtifactResponse artifactResponse = artifactResponseService.buildArtifactResponse(getArtifactResolveRequest("failed", true, false, SAML_COMBICONNECT, EncryptionType.BSN, ENTRANCE_ENTITY_ID), ENTRANCE_ENTITY_ID, TD);
Response response = (Response) artifactResponse.getMessage();
assertEquals(0, response.getAssertions().size());
}
|
public User getRequester() {
if (initiator.getType() == Initiator.Type.MANUAL) {
return ((ManualInitiator) initiator).getUser();
}
return null;
}
|
@Test
public void testGetRequester() {
RunRequest runRequest =
RunRequest.builder()
.currentPolicy(RunPolicy.START_FRESH_NEW_RUN)
.requester(User.create("tester"))
.build();
Assert.assertEquals("tester", runRequest.getRequester().getName());
runRequest =
RunRequest.builder()
.initiator(UpstreamInitiator.withType(Initiator.Type.FOREACH))
.currentPolicy(RunPolicy.START_FRESH_NEW_RUN)
.build();
Assert.assertNull(runRequest.getRequester());
}
|
@Override
public String toString() {
return "com.alibaba.csp.sentinel.spi.SpiLoader[" + service.getName() + "]";
}
|
@Test
public void testToString() {
SpiLoader spiLoader = SpiLoader.of(ProcessorSlot.class);
assertEquals("com.alibaba.csp.sentinel.spi.SpiLoader[com.alibaba.csp.sentinel.slotchain.ProcessorSlot]"
, spiLoader.toString());
}
|
@Override
protected double maintain() {
// Reboot candidates: Nodes in long-term states, where we know we can safely orchestrate a reboot
List<Node> nodesToReboot = nodeRepository().nodes().list(Node.State.active, Node.State.ready).stream()
.filter(node -> node.type().isHost())
.filter(this::shouldReboot)
.toList();
if (!nodesToReboot.isEmpty())
nodeRepository().nodes().reboot(NodeListFilter.from(nodesToReboot));
return 1.0;
}
|
@Test(timeout = 30_000) // Avoid looping forever if assertions don't hold
public void testRebootScheduledEvenWithSmallProbability() {
Duration rebootInterval = Duration.ofDays(30);
InMemoryFlagSource flagSource = new InMemoryFlagSource();
ProvisioningTester tester = createTester(rebootInterval, flagSource);
makeReadyHosts(2, tester);
NodeRepository nodeRepository = tester.nodeRepository();
NodeRebooter rebooter = new NodeRebooter(nodeRepository, flagSource, new TestMetric());
assertReadyHosts(2, nodeRepository, 0L);
// No reboots within 0x-1x reboot interval
tester.clock().advance(rebootInterval);
rebooter.maintain();
simulateReboot(nodeRepository);
assertReadyHosts(2, nodeRepository, 0L);
// Advancing just a little bit into the 1x-2x interval, there is a >0 probability of
// rebooting a host. Run until all have been scheduled.
tester.clock().advance(Duration.ofMinutes(25));
while (true) {
rebooter.maintain();
simulateReboot(nodeRepository);
NodeList nodes = nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.host);
int count = withCurrentRebootGeneration(1L, nodes.asList()).size();
if (count == 2) {
break;
}
}
}
|
public static Boolean isMultiInstance() {
return isMultiInstance;
}
|
@Test
void testIsMultiInstance() throws InvocationTargetException, IllegalAccessException {
initMethod.invoke(JvmUtil.class);
Boolean multiInstance = JvmUtil.isMultiInstance();
assertFalse(multiInstance);
}
|
@Override
public void patchInstance(String namespaceId, String serviceName, InstancePatchObject patchObject)
throws NacosException {
Service service = getService(namespaceId, serviceName, true);
Instance instance = getInstance(namespaceId, serviceName, patchObject.getCluster(), patchObject.getIp(),
patchObject.getPort());
String metadataId = InstancePublishInfo.genMetadataId(instance.getIp(), instance.getPort(),
instance.getClusterName());
Optional<InstanceMetadata> instanceMetadata = metadataManager.getInstanceMetadata(service, metadataId);
InstanceMetadata newMetadata = instanceMetadata.map(this::cloneMetadata).orElseGet(InstanceMetadata::new);
mergeMetadata(newMetadata, patchObject);
metadataOperateService.updateInstanceMetadata(service, metadataId, newMetadata);
}
|
@Test
void testPatchInstance() throws NacosException {
Instance instance = new Instance();
instance.setIp("1.1.1.1");
instance.setPort(8848);
instance.setClusterName("C");
List<Instance> instances = Collections.singletonList(instance);
ServiceInfo serviceInfo = new ServiceInfo();
serviceInfo.setHosts(instances);
when(serviceStorage.getData(Mockito.any())).thenReturn(serviceInfo);
instanceOperatorClient.patchInstance("A", "B", new InstancePatchObject("C", "1.1.1.1", 8848));
Mockito.verify(metadataOperateService).updateInstanceMetadata(Mockito.any(), Mockito.anyString(), Mockito.any());
}
|
@Override
public boolean wasNull() throws SQLException {
return mergedResult.wasNull();
}
|
@Test
void assertWasNull() throws SQLException {
assertFalse(createDecoratedEncryptShowCreateTableMergedResult(mergedResult, mock(EncryptRule.class)).wasNull());
}
|
public ParamType getForeachParamType(
String foreachInlineWorkflowId, String stepId, String paramName) {
String ret =
withMetricLogError(
() ->
withRetryableQuery(
GET_PARAM_TYPE_FROM_FOREACH_TEMPLATE,
stmt -> {
int idx = 0;
stmt.setString(++idx, paramName);
stmt.setString(++idx, foreachInlineWorkflowId);
stmt.setString(++idx, stepId);
},
result -> {
if (result.next()) {
return result.getString(PAYLOAD_COLUMN);
}
return null;
}),
"getForeachParamType",
"Failed to get the param type for [{}] from foreach step [{}][{}]",
paramName,
foreachInlineWorkflowId,
stepId);
if (ret == null) {
throw new MaestroNotFoundException(
"Parameter [%s] for foreach step [%s][%s] not found (either not created or deleted)",
paramName, foreachInlineWorkflowId, stepId);
}
return ParamType.create(ret);
}
|
@Test
public void testGetForeachParamType() throws Exception {
si = loadObject("fixtures/instances/sample-step-instance-succeeded.json", StepInstance.class);
si.setStepAttemptId(10);
stepDao.insertOrUpsertStepInstance(si, false);
assertEquals(
ParamType.LONG, stepDao.getForeachParamType("sample-dag-test-3", "job1", "sleep_seconds"));
}
|
private String getEnv(String envName, InterpreterLaunchContext context) {
String env = context.getProperties().getProperty(envName);
if (StringUtils.isBlank(env)) {
env = System.getenv(envName);
}
if (StringUtils.isBlank(env)) {
LOGGER.warn("environment variable: {} is empty", envName);
}
return env;
}
|
@Test
void testLocalMode() throws IOException {
SparkInterpreterLauncher launcher = new SparkInterpreterLauncher(zConf, null);
Properties properties = new Properties();
properties.setProperty("SPARK_HOME", sparkHome);
properties.setProperty("ENV_1", "");
properties.setProperty("property_1", "value_1");
properties.setProperty("spark.master", "local[*]");
properties.setProperty("spark.files", "file_1");
properties.setProperty("spark.jars", "jar_1");
InterpreterOption option = new InterpreterOption();
InterpreterLaunchContext context = new InterpreterLaunchContext(properties, option, null, "user1", "intpGroupId", "groupId", "spark", "spark", 0, "host");
InterpreterClient client = launcher.launch(context);
assertTrue( client instanceof ExecRemoteInterpreterProcess);
try (ExecRemoteInterpreterProcess interpreterProcess = (ExecRemoteInterpreterProcess) client) {
assertEquals("spark", interpreterProcess.getInterpreterSettingName());
assertTrue(interpreterProcess.getInterpreterDir().endsWith("/interpreter/spark"));
assertTrue(interpreterProcess.getLocalRepoDir().endsWith("/local-repo/groupId"));
assertEquals(zConf.getInterpreterRemoteRunnerPath(), interpreterProcess.getInterpreterRunner());
assertTrue(interpreterProcess.getEnv().size() >= 2);
assertEquals(sparkHome, interpreterProcess.getEnv().get("SPARK_HOME"));
assertFalse(interpreterProcess.getEnv().containsKey("ENV_1"));
String expected = "--conf|spark.files=file_1" +
"|--conf|spark.jars=jar_1|--conf|spark.app.name=intpGroupId|--conf|spark.master=local[*]";
assertTrue(CollectionUtils.isEqualCollection(Arrays.asList(expected.split("\\|")),
Arrays.asList(interpreterProcess.getEnv().get("ZEPPELIN_SPARK_CONF").split("\\|"))));
}
}
|
int doWork(final long nowNs)
{
int workCount = 0;
switch (state)
{
case INIT:
workCount += init(nowNs);
break;
case CANVASS:
workCount += canvass(nowNs);
break;
case NOMINATE:
workCount += nominate(nowNs);
break;
case CANDIDATE_BALLOT:
workCount += candidateBallot(nowNs);
break;
case FOLLOWER_BALLOT:
workCount += followerBallot(nowNs);
break;
case LEADER_LOG_REPLICATION:
workCount += leaderLogReplication(nowNs);
break;
case LEADER_REPLAY:
workCount += leaderReplay(nowNs);
break;
case LEADER_INIT:
workCount += leaderInit(nowNs);
break;
case LEADER_READY:
workCount += leaderReady(nowNs);
break;
case FOLLOWER_LOG_REPLICATION:
workCount += followerLogReplication(nowNs);
break;
case FOLLOWER_REPLAY:
workCount += followerReplay(nowNs);
break;
case FOLLOWER_CATCHUP_INIT:
workCount += followerCatchupInit(nowNs);
break;
case FOLLOWER_CATCHUP_AWAIT:
workCount += followerCatchupAwait(nowNs);
break;
case FOLLOWER_CATCHUP:
workCount += followerCatchup(nowNs);
break;
case FOLLOWER_LOG_INIT:
workCount += followerLogInit(nowNs);
break;
case FOLLOWER_LOG_AWAIT:
workCount += followerLogAwait(nowNs);
break;
case FOLLOWER_READY:
workCount += followerReady(nowNs);
break;
case CLOSED:
break;
}
return workCount;
}
|
@Test
void shouldElectSingleNodeClusterLeader()
{
final long leadershipTermId = NULL_VALUE;
final long logPosition = 0;
final ClusterMember[] clusterMembers = ClusterMember.parse(
"0,ingressEndpoint,consensusEndpoint,logEndpoint,catchupEndpoint,archiveEndpoint");
final ClusterMember thisMember = clusterMembers[0];
final Election election = newElection(leadershipTermId, logPosition, clusterMembers, thisMember);
final long newLeadershipTermId = leadershipTermId + 1;
when(recordingLog.isUnknown(newLeadershipTermId)).thenReturn(Boolean.TRUE);
clock.update(1, clock.timeUnit());
election.doWork(clock.nanoTime());
election.doWork(clock.nanoTime());
election.doWork(clock.nanoTime());
verify(consensusModuleAgent).joinLogAsLeader(eq(newLeadershipTermId), eq(logPosition), anyInt(), eq(true));
verify(electionStateCounter).setOrdered(ElectionState.LEADER_READY.code());
verify(electionCounter).incrementOrdered();
verify(recordingLog).ensureCoherent(
RECORDING_ID,
NULL_VALUE,
logPosition,
newLeadershipTermId,
logPosition,
NULL_VALUE,
clock.nanoTime(),
clock.nanoTime(),
ctx.fileSyncLevel());
}
|
public static Set<Class<?>> getInterfaces(Class<?> clazz) {
if (clazz.isInterface()) {
return Collections.singleton(clazz);
}
Set<Class<?>> interfaces = new LinkedHashSet<>();
while (clazz != null) {
Class<?>[] ifcs = clazz.getInterfaces();
for (Class<?> ifc : ifcs) {
interfaces.addAll(getInterfaces(ifc));
}
clazz = clazz.getSuperclass();
}
return interfaces;
}
|
@Test
public void testGetInterfaces() {
Assertions.assertArrayEquals(new Object[]{Serializable.class},
ReflectionUtil.getInterfaces(Serializable.class).toArray());
Assertions.assertArrayEquals(new Object[]{
Map.class, Cloneable.class, Serializable.class},
ReflectionUtil.getInterfaces(HashMap.class).toArray());
}
|
@PrivateApi
public static void waitWithDeadline(Collection<? extends Future> futures, long timeout, TimeUnit timeUnit) {
waitWithDeadline(futures, timeout, timeUnit, IGNORE_ALL_EXCEPT_LOG_MEMBER_LEFT);
}
|
@Test
public void test_waitWithDeadline_failing_second() {
AtomicBoolean waitLock = new AtomicBoolean(true);
List<Future> futures = new ArrayList<>();
for (int i = 0; i < 2; i++) {
futures.add(executorService.submit(new FailingCallable(waitLock)));
}
ExceptionCollector exceptionHandler = new ExceptionCollector();
waitWithDeadline(futures, 5, TimeUnit.SECONDS, exceptionHandler);
assertEquals(1, exceptionHandler.throwables.size());
Throwable throwable = exceptionHandler.throwables.iterator().next();
assertTrue(throwable instanceof ExecutionException);
assertTrue(throwable.getCause() instanceof SpecialRuntimeException);
}
|
@Description("ChiSquared cdf given the df parameter and value")
@ScalarFunction
@SqlType(StandardTypes.DOUBLE)
public static double chiSquaredCdf(
@SqlType(StandardTypes.DOUBLE) double df,
@SqlType(StandardTypes.DOUBLE) double value)
{
checkCondition(value >= 0, INVALID_FUNCTION_ARGUMENT, "chiSquaredCdf Function: value must non-negative");
checkCondition(df > 0, INVALID_FUNCTION_ARGUMENT, "chiSquaredCdf Function: df must be greater than 0");
ChiSquaredDistribution distribution = new ChiSquaredDistribution(null, df, ChiSquaredDistribution.DEFAULT_INVERSE_ABSOLUTE_ACCURACY);
return distribution.cumulativeProbability(value);
}
|
@Test
public void testChiSquaredCdf()
{
assertFunction("chi_squared_cdf(3, 0.0)", DOUBLE, 0.0);
assertFunction("round(chi_squared_cdf(3, 1.0), 4)", DOUBLE, 0.1987);
assertFunction("round(chi_squared_cdf(3, 2.5), 2)", DOUBLE, 0.52);
assertFunction("round(chi_squared_cdf(3, 4), 2)", DOUBLE, 0.74);
assertInvalidFunction("chi_squared_cdf(-3, 0.3)", "chiSquaredCdf Function: df must be greater than 0");
assertInvalidFunction("chi_squared_cdf(3, -10)", "chiSquaredCdf Function: value must non-negative");
}
|
@Override
public Iterator<E> iterator() {
return new ElementIterator();
}
|
@Test
public void testIterator() {
final int elementCount = 10;
final BitSet foundElements = new BitSet(elementCount);
final OAHashSet<Integer> set = new OAHashSet<>(8);
populateSet(set, elementCount);
for (int stored : set) {
foundElements.set(stored);
}
for (int i = 0; i < elementCount; i++) {
assertTrue(foundElements.get(i));
}
}
|
public static String parsePath(String uri, Map<String, String> patterns) {
if (uri == null) {
return null;
} else if (StringUtils.isBlank(uri)) {
return String.valueOf(SLASH);
}
CharacterIterator ci = new StringCharacterIterator(uri);
StringBuilder pathBuffer = new StringBuilder();
char c = ci.first();
if (c == CharacterIterator.DONE) {
return String.valueOf(SLASH);
}
do {
if (c == OPEN) {
String regexBuffer = cutParameter(ci, patterns);
if (regexBuffer == null) {
LOGGER.warn("Operation path \"{}\" contains syntax error.", uri);
return null;
}
pathBuffer.append(regexBuffer);
} else {
int length = pathBuffer.length();
if (!(c == SLASH && (length != 0 && pathBuffer.charAt(length - 1) == SLASH))) {
pathBuffer.append(c);
}
}
} while ((c = ci.next()) != CharacterIterator.DONE);
return pathBuffer.toString();
}
|
@Test(description = "parse path with two params in one part")
public void parsePathWithTwoParamsInOnePart() {
final Map<String, String> regexMap = new HashMap<String, String>();
final String path = PathUtils.parsePath("/{a:\\w+}-{b:\\w+}/c", regexMap);
assertEquals(path, "/{a}-{b}/c");
assertEquals(regexMap.get("a"), "\\w+");
assertEquals(regexMap.get("b"), "\\w+");
}
|
@Override
public ListenableFuture<?> execute(CreateMaterializedView statement, TransactionManager transactionManager, Metadata metadata, AccessControl accessControl, Session session, List<Expression> parameters, WarningCollector warningCollector)
{
QualifiedObjectName viewName = createQualifiedObjectName(session, statement, statement.getName());
Optional<TableHandle> viewHandle = metadata.getMetadataResolver(session).getTableHandle(viewName);
if (viewHandle.isPresent()) {
if (!statement.isNotExists()) {
throw new SemanticException(MATERIALIZED_VIEW_ALREADY_EXISTS, statement, "Materialized view '%s' already exists", viewName);
}
return immediateFuture(null);
}
accessControl.checkCanCreateTable(session.getRequiredTransactionId(), session.getIdentity(), session.getAccessControlContext(), viewName);
accessControl.checkCanCreateView(session.getRequiredTransactionId(), session.getIdentity(), session.getAccessControlContext(), viewName);
Map<NodeRef<Parameter>, Expression> parameterLookup = parameterExtractor(statement, parameters);
Analyzer analyzer = new Analyzer(session, metadata, sqlParser, accessControl, Optional.empty(), parameters, parameterLookup, warningCollector);
Analysis analysis = analyzer.analyze(statement);
ConnectorId connectorId = metadata.getCatalogHandle(session, viewName.getCatalogName())
.orElseThrow(() -> new PrestoException(NOT_FOUND, "Catalog does not exist: " + viewName.getCatalogName()));
List<ColumnMetadata> columnMetadata = analysis.getOutputDescriptor(statement.getQuery())
.getVisibleFields().stream()
.map(field -> new ColumnMetadata(field.getName().get(), field.getType()))
.collect(toImmutableList());
Map<String, Expression> sqlProperties = mapFromProperties(statement.getProperties());
Map<String, Object> properties = metadata.getTablePropertyManager().getProperties(
connectorId,
viewName.getCatalogName(),
sqlProperties,
session,
metadata,
parameterLookup);
ConnectorTableMetadata viewMetadata = new ConnectorTableMetadata(
toSchemaTableName(viewName),
columnMetadata,
properties,
statement.getComment());
String sql = getFormattedSql(statement.getQuery(), sqlParser, Optional.of(parameters));
List<SchemaTableName> baseTables = analysis.getTableNodes().stream()
.map(table -> {
QualifiedObjectName tableName = createQualifiedObjectName(session, table, table.getName());
if (!viewName.getCatalogName().equals(tableName.getCatalogName())) {
throw new SemanticException(
NOT_SUPPORTED,
statement,
"Materialized view %s created from a base table in a different catalog %s is not supported.",
viewName, tableName);
}
return toSchemaTableName(tableName);
})
.distinct()
.collect(toImmutableList());
MaterializedViewColumnMappingExtractor extractor = new MaterializedViewColumnMappingExtractor(analysis, session);
MaterializedViewDefinition viewDefinition = new MaterializedViewDefinition(
sql,
viewName.getSchemaName(),
viewName.getObjectName(),
baseTables,
Optional.of(session.getUser()),
extractor.getMaterializedViewColumnMappings(),
extractor.getMaterializedViewDirectColumnMappings(),
extractor.getBaseTablesOnOuterJoinSide(),
Optional.empty());
try {
metadata.createMaterializedView(session, viewName.getCatalogName(), viewMetadata, viewDefinition, statement.isNotExists());
}
catch (PrestoException e) {
// connectors are not required to handle the ignoreExisting flag
if (!e.getErrorCode().equals(ALREADY_EXISTS.toErrorCode()) || !statement.isNotExists()) {
throw e;
}
}
return immediateFuture(null);
}
|
@Test
public void testCreateMaterializedViewExistsFalse()
{
SqlParser parser = new SqlParser();
String sql = String.format("CREATE MATERIALIZED VIEW %s AS SELECT 2021 AS col_0 FROM %s", MATERIALIZED_VIEW_B, TABLE_A);
CreateMaterializedView statement = (CreateMaterializedView) parser.createStatement(sql, ParsingOptions.builder().build());
QueryStateMachine stateMachine = QueryStateMachine.begin(
sql,
Optional.empty(),
testSession,
URI.create("fake://uri"),
new ResourceGroupId("test"),
Optional.empty(),
false,
transactionManager,
accessControl,
executorService,
metadata,
WarningCollector.NOOP);
WarningCollector warningCollector = stateMachine.getWarningCollector();
try {
getFutureValue(new CreateMaterializedViewTask(parser).execute(statement, transactionManager, metadata, accessControl, testSession, emptyList(), warningCollector));
fail("expected exception");
}
catch (RuntimeException e) {
// Expected
assertTrue(e instanceof PrestoException);
PrestoException prestoException = (PrestoException) e;
assertEquals(prestoException.getErrorCode(), ALREADY_EXISTS.toErrorCode());
}
assertEquals(metadata.getCreateMaterializedViewCallCount(), 0);
}
|
public static String normalizeUri(String uri) throws URISyntaxException {
// try to parse using the simpler and faster Camel URI parser
String[] parts = CamelURIParser.fastParseUri(uri);
if (parts != null) {
// we optimized specially if an empty array is returned
if (parts == URI_ALREADY_NORMALIZED) {
return uri;
}
// use the faster and more simple normalizer
return doFastNormalizeUri(parts);
} else {
// use the legacy normalizer as the uri is complex and may have unsafe URL characters
return doComplexNormalizeUri(uri);
}
}
|
@Test
public void testNormalizeHttpEndpoint() throws Exception {
String out1 = URISupport.normalizeUri("http://www.google.com?q=Camel");
String out2 = URISupport.normalizeUri("http:www.google.com?q=Camel");
assertEquals(out1, out2);
assertTrue(out1.startsWith("http://"), "Should have //");
assertTrue(out2.startsWith("http://"), "Should have //");
}
|
@Override
protected void runTask() {
if (backgroundJobServer.isRunning() && reentrantLock.tryLock()) {
try {
LOGGER.trace("Looking for enqueued jobs... ");
final AmountRequest workPageRequest = workDistributionStrategy.getWorkPageRequest();
if (workPageRequest.getLimit() > 0) {
final List<Job> enqueuedJobs = storageProvider.getJobsToProcess(backgroundJobServer, workPageRequest);
enqueuedJobs.forEach(backgroundJobServer::processJob);
LOGGER.debug("Found {} enqueued jobs to process.", enqueuedJobs.size());
}
} finally {
reentrantLock.unlock();
}
}
}
|
@Test
void testTaskCanHappenAgainAfterException() {
Job enqueuedJob1 = anEnqueuedJob().build();
Job enqueuedJob2 = anEnqueuedJob().build();
when(storageProvider.getJobsToProcess(eq(backgroundJobServer), any()))
.thenThrow(new StorageException("Some error occurred"))
.thenReturn(asList(enqueuedJob1, enqueuedJob2), emptyJobList());
new Thread(() -> runTask(task)).start();
SleepUtils.sleep(500);
runTask(task);
verify(backgroundJobServer).processJob(enqueuedJob1);
verify(backgroundJobServer).processJob(enqueuedJob2);
}
|
@SuppressWarnings("fallthrough")
@Override
public void authenticate() throws IOException {
if (saslState != SaslState.REAUTH_PROCESS_HANDSHAKE) {
if (netOutBuffer != null && !flushNetOutBufferAndUpdateInterestOps())
return;
if (saslServer != null && saslServer.isComplete()) {
setSaslState(SaslState.COMPLETE);
return;
}
// allocate on heap (as opposed to any socket server memory pool)
if (netInBuffer == null) netInBuffer = new NetworkReceive(saslAuthRequestMaxReceiveSize, connectionId);
try {
netInBuffer.readFrom(transportLayer);
} catch (InvalidReceiveException e) {
throw new SaslAuthenticationException("Failing SASL authentication due to invalid receive size", e);
}
if (!netInBuffer.complete())
return;
netInBuffer.payload().rewind();
}
byte[] clientToken = new byte[netInBuffer.payload().remaining()];
netInBuffer.payload().get(clientToken, 0, clientToken.length);
netInBuffer = null; // reset the networkReceive as we read all the data.
try {
switch (saslState) {
case REAUTH_PROCESS_HANDSHAKE:
case HANDSHAKE_OR_VERSIONS_REQUEST:
case HANDSHAKE_REQUEST:
handleKafkaRequest(clientToken);
break;
case REAUTH_BAD_MECHANISM:
throw new SaslAuthenticationException(reauthInfo.badMechanismErrorMessage);
case INITIAL_REQUEST:
if (handleKafkaRequest(clientToken))
break;
// For default GSSAPI, fall through to authenticate using the client token as the first GSSAPI packet.
// This is required for interoperability with 0.9.0.x clients which do not send handshake request
case AUTHENTICATE:
handleSaslToken(clientToken);
// When the authentication exchange is complete and no more tokens are expected from the client,
// update SASL state. Current SASL state will be updated when outgoing writes to the client complete.
if (saslServer.isComplete())
setSaslState(SaslState.COMPLETE);
break;
default:
break;
}
} catch (AuthenticationException e) {
// Exception will be propagated after response is sent to client
setSaslState(SaslState.FAILED, e);
} catch (Exception e) {
// In the case of IOExceptions and other unexpected exceptions, fail immediately
saslState = SaslState.FAILED;
LOG.debug("Failed during {}: {}", reauthInfo.authenticationOrReauthenticationText(), e.getMessage());
throw e;
}
}
|
@Test
public void testUnexpectedRequestType() throws IOException {
TransportLayer transportLayer = mock(TransportLayer.class);
Map<String, ?> configs = Collections.singletonMap(BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG,
Collections.singletonList(SCRAM_SHA_256.mechanismName()));
SaslServerAuthenticator authenticator = setupAuthenticator(configs, transportLayer,
SCRAM_SHA_256.mechanismName(), new DefaultChannelMetadataRegistry());
RequestHeader header = new RequestHeader(ApiKeys.METADATA, (short) 0, clientId, 13243);
ByteBuffer headerBuffer = RequestTestUtils.serializeRequestHeader(header);
when(transportLayer.read(any(ByteBuffer.class))).then(invocation -> {
invocation.<ByteBuffer>getArgument(0).putInt(headerBuffer.remaining());
return 4;
}).then(invocation -> {
// serialize only the request header. the authenticator should not parse beyond this
invocation.<ByteBuffer>getArgument(0).put(headerBuffer.duplicate());
return headerBuffer.remaining();
});
try {
authenticator.authenticate();
fail("Expected authenticate() to raise an exception");
} catch (IllegalSaslStateException e) {
// expected exception
}
verify(transportLayer, times(2)).read(any(ByteBuffer.class));
}
|
@Override
public V load(K key) {
awaitSuccessfulInit();
try (SqlResult queryResult = sqlService.execute(queries.load(), key)) {
Iterator<SqlRow> it = queryResult.iterator();
V value = null;
if (it.hasNext()) {
SqlRow sqlRow = it.next();
if (it.hasNext()) {
throw new IllegalStateException("multiple matching rows for a key " + key);
}
// If there is a single column as the value, return that column as the value
if (queryResult.getRowMetadata().getColumnCount() == 2 && genericMapStoreProperties.singleColumnAsValue) {
value = sqlRow.getObject(1);
} else {
//noinspection unchecked
value = (V) toGenericRecord(sqlRow, genericMapStoreProperties);
}
}
return value;
}
}
|
@Test
public void givenTableNameProperty_whenCreateMapLoader_thenUseTableNameWithCustomSchema() {
assumeTrue(objectProvider instanceof JdbcDatabaseProvider);
var jdbcDatabaseProvider = (JdbcObjectProvider) objectProvider;
String schemaName = "custom_schema";
jdbcDatabaseProvider.createSchema(schemaName);
String tableName = randomName() + "-with-hyphen";
String fullTableName = schemaName + "." + databaseProvider.quote(tableName);
ObjectSpec spec = objectProvider.createObject(fullTableName, false);
objectProvider.insertItems(spec, 1);
Properties properties = new Properties();
properties.setProperty(DATA_CONNECTION_REF_PROPERTY, TEST_DATABASE_REF);
properties.setProperty(EXTERNAL_NAME_PROPERTY, schemaName + ".\"" + tableName + "\"");
mapLoader = createMapLoader(properties, hz);
GenericRecord genericRecord = mapLoader.load(0);
assertThat(genericRecord).isNotNull();
}
|
public static String camelToSplitName(String camelName, String split) {
if (isEmpty(camelName)) {
return camelName;
}
if (!isWord(camelName)) {
// convert Ab-Cd-Ef to ab-cd-ef
if (isSplitCase(camelName, split.charAt(0))) {
return camelName.toLowerCase();
}
// not camel case
return camelName;
}
StringBuilder buf = null;
for (int i = 0; i < camelName.length(); i++) {
char ch = camelName.charAt(i);
if (ch >= 'A' && ch <= 'Z') {
if (buf == null) {
buf = new StringBuilder();
if (i > 0) {
buf.append(camelName, 0, i);
}
}
if (i > 0) {
buf.append(split);
}
buf.append(Character.toLowerCase(ch));
} else if (buf != null) {
buf.append(ch);
}
}
return buf == null ? camelName.toLowerCase() : buf.toString().toLowerCase();
}
|
@Test
void testCamelToSplitName() throws Exception {
assertEquals("ab-cd-ef", StringUtils.camelToSplitName("abCdEf", "-"));
assertEquals("ab-cd-ef", StringUtils.camelToSplitName("AbCdEf", "-"));
assertEquals("abcdef", StringUtils.camelToSplitName("abcdef", "-"));
// assertEquals("name", StringUtils.camelToSplitName("NAME", "-"));
assertEquals("ab-cd-ef", StringUtils.camelToSplitName("ab-cd-ef", "-"));
assertEquals("ab-cd-ef", StringUtils.camelToSplitName("Ab-Cd-Ef", "-"));
assertEquals("Ab_Cd_Ef", StringUtils.camelToSplitName("Ab_Cd_Ef", "-"));
assertEquals("AB_CD_EF", StringUtils.camelToSplitName("AB_CD_EF", "-"));
assertEquals("ab.cd.ef", StringUtils.camelToSplitName("AbCdEf", "."));
// assertEquals("ab.cd.ef", StringUtils.camelToSplitName("ab-cd-ef", "."));
}
|
public void run(OutputReceiver<PartitionRecord> receiver) throws InvalidProtocolBufferException {
// Erase any existing missing partitions.
metadataTableDao.writeDetectNewPartitionMissingPartitions(new HashMap<>());
List<PartitionRecord> partitions = metadataTableDao.readAllStreamPartitions();
for (PartitionRecord partitionRecord : partitions) {
if (partitionRecord.getUuid().isEmpty()) {
partitionRecord.setUuid(UniqueIdGenerator.getNextId());
}
if (endTime != null) {
partitionRecord.setEndTime(endTime);
}
LOG.info("DNP: Outputting existing partition: {}", partitionRecord);
metrics.incListPartitionsCount();
receiver.outputWithTimestamp(partitionRecord, Instant.EPOCH);
}
List<NewPartition> newPartitions = metadataTableDao.readNewPartitionsIncludingDeleted();
for (NewPartition newPartition : newPartitions) {
processNewPartitionsAction.processNewPartition(newPartition, receiver);
}
}
|
@Test
public void testResetMissingPartitions() throws InvalidProtocolBufferException {
HashMap<ByteStringRange, Instant> missingPartitionsDuration = new HashMap<>();
missingPartitionsDuration.put(ByteStringRange.create("A", "B"), Instant.now());
metadataTableDao.writeDetectNewPartitionMissingPartitions(missingPartitionsDuration);
assertFalse(metadataTableDao.readDetectNewPartitionMissingPartitions().isEmpty());
action.run(receiver);
assertTrue(metadataTableDao.readDetectNewPartitionMissingPartitions().isEmpty());
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.