focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@SuppressWarnings({"unchecked", "UnstableApiUsage"})
@Override
public <T extends Statement> ConfiguredStatement<T> inject(
final ConfiguredStatement<T> statement) {
if (!(statement.getStatement() instanceof DropStatement)) {
return statement;
}
final DropStatement dropStatement = (DropStatement) statement.getStatement();
if (!dropStatement.isDeleteTopic()) {
return statement;
}
final SourceName sourceName = dropStatement.getName();
final DataSource source = metastore.getSource(sourceName);
if (source != null) {
if (source.isSource()) {
throw new KsqlException("Cannot delete topic for read-only source: " + sourceName.text());
}
checkTopicRefs(source);
deleteTopic(source);
final Closer closer = Closer.create();
closer.register(() -> deleteKeySubject(source));
closer.register(() -> deleteValueSubject(source));
try {
closer.close();
} catch (final KsqlException e) {
throw e;
} catch (final Exception e) {
throw new KsqlException(e);
}
} else if (!dropStatement.getIfExists()) {
throw new KsqlException("Could not find source to delete topic for: " + statement);
}
final T withoutDelete = (T) dropStatement.withoutDeleteClause();
final String withoutDeleteText = SqlFormatter.formatSql(withoutDelete) + ";";
return statement.withStatement(withoutDeleteText, withoutDelete);
}
|
@Test
public void shouldDeleteValueAvroSchemaInSrEvenIfKeyDeleteFails() throws IOException, RestClientException {
// Given:
when(topic.getKeyFormat()).thenReturn(KeyFormat.of(FormatInfo.of(FormatFactory.AVRO.name()), SerdeFeatures.of(), Optional.empty()));
when(topic.getValueFormat()).thenReturn(ValueFormat.of(FormatInfo.of(FormatFactory.AVRO.name()),
SerdeFeatures.of()));
doThrow(new KsqlException("foo"))
.when(registryClient)
.deleteSubject(KsqlConstants.getSRSubject("something", true));
// When:
assertThrows(KsqlException.class, () -> deleteInjector.inject(DROP_WITH_DELETE_TOPIC));
// Then:
verify(registryClient).deleteSubject(KsqlConstants.getSRSubject("something", false));
}
|
private ExitStatus run() {
try {
init();
return new Processor().processNamespace().getExitStatus();
} catch (IllegalArgumentException e) {
System.out.println(e + ". Exiting ...");
return ExitStatus.ILLEGAL_ARGUMENTS;
} catch (IOException e) {
System.out.println(e + ". Exiting ...");
LOG.error(e + ". Exiting ...");
return ExitStatus.IO_EXCEPTION;
} finally {
dispatcher.shutdownNow();
}
}
|
@Test(timeout = 300000)
public void testMoveWhenStoragePolicySatisfierIsRunning() throws Exception {
final Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
StoragePolicySatisfierMode.EXTERNAL.toString());
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(3)
.storageTypes(
new StorageType[][] {{StorageType.DISK}, {StorageType.DISK},
{StorageType.DISK}}).build();
try {
cluster.waitActive();
// Simulate External sps by creating #getNameNodeConnector instance.
DFSTestUtil.getNameNodeConnector(conf, HdfsServerConstants.MOVER_ID_PATH,
1, true);
final DistributedFileSystem dfs = cluster.getFileSystem();
final String file = "/testMoveWhenStoragePolicySatisfierIsRunning";
// write to DISK
final FSDataOutputStream out = dfs.create(new Path(file));
out.writeChars("testMoveWhenStoragePolicySatisfierIsRunning");
out.close();
// move to ARCHIVE
dfs.setStoragePolicy(new Path(file), "COLD");
int rc = ToolRunner.run(conf, new Mover.Cli(),
new String[] {"-p", file.toString()});
int exitcode = ExitStatus.IO_EXCEPTION.getExitCode();
Assert.assertEquals("Exit code should be " + exitcode, exitcode, rc);
} finally {
cluster.shutdown();
}
}
|
protected void validatePersistentTopicName(String property, String namespace, String encodedTopic) {
validateTopicName(property, namespace, encodedTopic);
if (topicName.getDomain() != TopicDomain.persistent) {
throw new RestException(Status.NOT_ACCEPTABLE, "Need to provide a persistent topic name");
}
}
|
@Test
public void testValidatePersistentTopicNameInvalid() {
String tenant = "test-tenant";
String namespace = "test-namespace";
String topic = Codec.encode("test-topic");
AdminResource nPResource = mockNonPersistentResource();
try {
nPResource.validatePersistentTopicName(tenant, namespace, topic);
fail("Should fail validation on non-persistent topic");
} catch (RestException e) {
assertEquals(Status.NOT_ACCEPTABLE.getStatusCode(), e.getResponse().getStatus());
}
}
|
public static MetricsSource makeSource(Object source) {
return new MetricsSourceBuilder(source,
DefaultMetricsFactory.getAnnotatedMetricsFactory()).build();
}
|
@Test public void testClasses() {
MetricsRecordBuilder rb = getMetrics(
MetricsAnnotations.makeSource(new MyMetrics3()));
MetricsCollector collector = rb.parent();
verify(collector).addRecord(info("MyMetrics3", "My metrics"));
verify(rb).add(tag(MsInfo.Context, "foo"));
}
|
public static void main(String[] args) {
BarSeries series = CsvTradesLoader.loadBitstampSeries();
System.out.println("Series: " + series.getName() + " (" + series.getSeriesPeriodDescription() + ")");
System.out.println("Number of bars: " + series.getBarCount());
System.out.println("First bar: \n" + "\tVolume: " + series.getBar(0).getVolume() + "\n" + "\tNumber of trades: "
+ series.getBar(0).getTrades() + "\n" + "\tClose price: " + series.getBar(0).getClosePrice());
}
|
@Test
public void test() {
CsvTradesLoader.main(null);
}
|
@Override
public void write(int b) throws IOException {
if (buffer.length <= bufferIdx) {
flushInternalBuffer();
}
buffer[bufferIdx] = (byte) b;
++bufferIdx;
}
|
@Test
void testPrimaryWriteFail() throws Exception {
DuplicatingCheckpointOutputStream duplicatingStream =
createDuplicatingStreamWithFailingPrimary();
testFailingPrimaryStream(
duplicatingStream,
() -> {
for (int i = 0; i < 128; i++) {
duplicatingStream.write(42);
}
});
}
|
public static KeyFormat sanitizeKeyFormat(
final KeyFormat keyFormat,
final List<SqlType> newKeyColumnSqlTypes,
final boolean allowKeyFormatChangeToSupportNewKeySchema
) {
return sanitizeKeyFormatWrapping(
!allowKeyFormatChangeToSupportNewKeySchema ? keyFormat :
sanitizeKeyFormatForTypeCompatibility(
sanitizeKeyFormatForMultipleColumns(
keyFormat,
newKeyColumnSqlTypes.size()),
newKeyColumnSqlTypes
),
newKeyColumnSqlTypes.size() == 1
);
}
|
@Test
public void shouldNotConvertFormatForMulticolKeysWhenSanitizingIfDisallowed() {
// Given:
final KeyFormat format = KeyFormat.nonWindowed(
FormatInfo.of(KafkaFormat.NAME),
SerdeFeatures.of());
// When:
final KeyFormat sanitized = SerdeFeaturesFactory.sanitizeKeyFormat(format, MULTI_SQL_TYPES, false);
// Then:
assertThat(sanitized.getFormatInfo(), equalTo(FormatInfo.of(KafkaFormat.NAME)));
assertThat(sanitized.getFeatures(), equalTo(SerdeFeatures.of()));
}
|
public static Map<String, String> resolveAll(Map<String, String> properties) {
return resolveAll(System.getenv(), properties);
}
|
@Test
public void testMultipleEnvironmentSubstitutions() {
Map<String, String> result =
EnvironmentUtil.resolveAll(
ImmutableMap.of("USER", "u", "VAR", "value"),
ImmutableMap.of("user-test", "env:USER", "other", "left-alone", "var", "env:VAR"));
assertThat(result)
.as("Should resolve all values starting with env:")
.isEqualTo(ImmutableMap.of("user-test", "u", "other", "left-alone", "var", "value"));
}
|
public static <E> List<E> ensureImmutable(List<E> list) {
if (list.isEmpty()) return Collections.emptyList();
// Faster to make a copy than check the type to see if it is already a singleton list
if (list.size() == 1) return Collections.singletonList(list.get(0));
if (isImmutable(list)) return list;
return Collections.unmodifiableList(new ArrayList<E>(list));
}
|
@Test void ensureImmutable_returnsImmutableEmptyList() {
assertThrows(UnsupportedOperationException.class, () -> {
Lists.ensureImmutable(new ArrayList<>()).add("foo");
});
}
|
@VisibleForTesting
static boolean onlyContainsSpecifiersInAllowList(String pattern) {
var noSpecifierFormatBase = SPECIFIER_ALLOW_LIST_REGEX.matcher(pattern).replaceAll("");
// If it still has a specifier after the replacement, it means that it was not on the allowlist.
return !noSpecifierFormatBase.contains("%");
}
|
@Test
public void testOnlyContainsSpecifiersInAllowList() {
assertTrue(onlyContainsSpecifiersInAllowList("%%%n%b%h%c%s"));
assertTrue(onlyContainsSpecifiersInAllowList("%1$s%<s%1s%1.2s%2$-3.4s%<-42s"));
// Implies a Formattable argument, so no need to match here
assertFalse(onlyContainsSpecifiersInAllowList("%#s"));
// Use locale-aware uppercase
assertFalse(onlyContainsSpecifiersInAllowList("%S"));
assertFalse(onlyContainsSpecifiersInAllowList("%B"));
assertFalse(onlyContainsSpecifiersInAllowList("%H"));
assertFalse(onlyContainsSpecifiersInAllowList("%C"));
// Use locale-aware formatting
assertFalse(onlyContainsSpecifiersInAllowList("%d"));
assertFalse(onlyContainsSpecifiersInAllowList("%o"));
assertFalse(onlyContainsSpecifiersInAllowList("%x"));
assertFalse(onlyContainsSpecifiersInAllowList("%X"));
assertFalse(onlyContainsSpecifiersInAllowList("%e"));
assertFalse(onlyContainsSpecifiersInAllowList("%E"));
assertFalse(onlyContainsSpecifiersInAllowList("%f"));
assertFalse(onlyContainsSpecifiersInAllowList("%g"));
assertFalse(onlyContainsSpecifiersInAllowList("%G"));
assertFalse(onlyContainsSpecifiersInAllowList("%a"));
assertFalse(onlyContainsSpecifiersInAllowList("%A"));
assertFalse(onlyContainsSpecifiersInAllowList("%tc"));
assertFalse(onlyContainsSpecifiersInAllowList("%Tc"));
}
|
@Override
public Path resolvePath(final Path p) throws IOException {
return super.resolvePath(fullPath(p));
}
|
@Test
public void testResolvePath() throws IOException {
Assert.assertEquals(chrootedTo, fSys.resolvePath(new Path("/")));
fileSystemTestHelper.createFile(fSys, "/foo");
Assert.assertEquals(new Path(chrootedTo, "foo"),
fSys.resolvePath(new Path("/foo")));
}
|
public void schedule(String eventDefinitionId) {
final EventDefinitionDto eventDefinition = getEventDefinitionOrThrowIAE(eventDefinitionId);
createJobDefinitionAndTriggerIfScheduledType(eventDefinition);
}
|
@Test
@MongoDBFixtures("event-processors-without-schedule.json")
public void scheduleWithMissingEventDefinition() {
final String id = "54e3deadbeefdeadbeef9999";
// The event definition should not exist so our test works
assertThat(eventDefinitionService.get(id)).isNotPresent();
assertThatThrownBy(() -> handler.schedule(id))
.hasMessageContaining("doesn't exist")
.isInstanceOf(IllegalArgumentException.class);
}
|
public static void warn(final Logger logger, final String format, final Supplier<Object> supplier) {
if (logger.isWarnEnabled()) {
logger.warn(format, supplier.get());
}
}
|
@Test
public void testNeverWarn() {
when(logger.isWarnEnabled()).thenReturn(false);
LogUtils.warn(logger, supplier);
verify(supplier, never()).get();
}
|
@Override
public FullBinaryMemcacheResponse replace(ByteBuf content) {
ByteBuf key = key();
if (key != null) {
key = key.retainedDuplicate();
}
ByteBuf extras = extras();
if (extras != null) {
extras = extras.retainedDuplicate();
}
return newInstance(key, extras, content);
}
|
@Test
public void fullReplace() {
ByteBuf newContent = Unpooled.copiedBuffer("new value", CharsetUtil.UTF_8);
FullBinaryMemcacheResponse newInstance = response.replace(newContent);
try {
assertResponseEquals(response, newContent, newInstance);
} finally {
response.release();
newInstance.release();
}
}
|
@Override
public void write(int value) {
bos.write(value);
}
|
@Test
public void serializeByteArrayWithLengthExceedingWithZeros() throws IOException {
assertArrayEquals(new byte[] {1, 2}, write( (o) -> o.write(new byte[] { 0, 0, 1, 2}, 2)));
}
|
public static <V> SetOnceReference<V> of(final V value) {
return new SetOnceReference<>(Objects.requireNonNull(value));
}
|
@Test
public void testFromOfWithValue() {
final Sentinel sentinel = new Sentinel();
checkSetReferenceIsImmutable(SetOnceReference.of(sentinel), sentinel);
}
|
public Collection<Task> createTasks(final Consumer<byte[], byte[]> consumer,
final Map<TaskId, Set<TopicPartition>> tasksToBeCreated) {
final List<Task> createdTasks = new ArrayList<>();
for (final Map.Entry<TaskId, Set<TopicPartition>> newTaskAndPartitions : tasksToBeCreated.entrySet()) {
final TaskId taskId = newTaskAndPartitions.getKey();
final LogContext logContext = getLogContext(taskId);
final Set<TopicPartition> partitions = newTaskAndPartitions.getValue();
final ProcessorTopology topology = topologyMetadata.buildSubtopology(taskId);
final ProcessorStateManager stateManager = new ProcessorStateManager(
taskId,
Task.TaskType.ACTIVE,
eosEnabled(applicationConfig),
logContext,
stateDirectory,
storeChangelogReader,
topology.storeToChangelogTopic(),
partitions,
stateUpdaterEnabled);
final InternalProcessorContext<Object, Object> context = new ProcessorContextImpl(
taskId,
applicationConfig,
stateManager,
streamsMetrics,
cache
);
createdTasks.add(
createActiveTask(
taskId,
partitions,
consumer,
logContext,
topology,
stateManager,
context
)
);
}
return createdTasks;
}
|
@Test
public void shouldThrowStreamsExceptionOnErrorCloseThreadProducerIfEosDisabled() {
createTasks();
mockClientSupplier.producers.get(0).closeException = new RuntimeException("KABOOM!");
final StreamsException thrown = assertThrows(
StreamsException.class,
activeTaskCreator::closeThreadProducerIfNeeded
);
assertThat(thrown.getMessage(), is("Thread producer encounter error trying to close."));
assertThat(thrown.getCause().getMessage(), is("KABOOM!"));
}
|
public static Collection<SubquerySegment> getSubquerySegments(final SelectStatement selectStatement) {
List<SubquerySegment> result = new LinkedList<>();
extractSubquerySegments(result, selectStatement);
return result;
}
|
@Test
void assertGetSubquerySegmentsWithMultiNestedSubquery() {
SelectStatement selectStatement = mock(SelectStatement.class);
SubquerySegment subquerySelect = createSubquerySegmentForFrom();
when(selectStatement.getFrom()).thenReturn(Optional.of(new SubqueryTableSegment(0, 0, subquerySelect)));
Collection<SubquerySegment> actual = SubqueryExtractUtils.getSubquerySegments(selectStatement);
assertThat(actual.size(), is(2));
}
|
@Override public long get(long key) {
return super.get0(key, 0);
}
|
@Test
public void testGet() {
final long key = random.nextLong();
final long valueAddress = insert(key).address();
final long valueAddress2 = hsa.get(key);
assertEquals(valueAddress, valueAddress2);
}
|
@Override
public boolean accept(ProcessingEnvironment processingEnv, DeclaredType type) {
return isSimpleType(type);
}
|
@Test
void testAccept() {
assertTrue(builder.accept(processingEnv, vField.asType()));
assertTrue(builder.accept(processingEnv, zField.asType()));
assertTrue(builder.accept(processingEnv, cField.asType()));
assertTrue(builder.accept(processingEnv, bField.asType()));
assertTrue(builder.accept(processingEnv, sField.asType()));
assertTrue(builder.accept(processingEnv, iField.asType()));
assertTrue(builder.accept(processingEnv, lField.asType()));
assertTrue(builder.accept(processingEnv, fField.asType()));
assertTrue(builder.accept(processingEnv, dField.asType()));
assertTrue(builder.accept(processingEnv, strField.asType()));
assertTrue(builder.accept(processingEnv, bdField.asType()));
assertTrue(builder.accept(processingEnv, biField.asType()));
assertTrue(builder.accept(processingEnv, dtField.asType()));
// false condition
assertFalse(builder.accept(processingEnv, invalidField.asType()));
}
|
List<Token> tokenize() throws ScanException {
List<Token> tokenList = new ArrayList<Token>();
StringBuilder buf = new StringBuilder();
while (pointer < patternLength) {
char c = pattern.charAt(pointer);
pointer++;
switch (state) {
case LITERAL_STATE:
handleLiteralState(c, tokenList, buf);
break;
case START_STATE:
handleStartState(c, tokenList, buf);
break;
case DEFAULT_VAL_STATE:
handleDefaultValueState(c, tokenList, buf);
default:
}
}
// EOS
switch (state) {
case LITERAL_STATE:
addLiteralToken(tokenList, buf);
break;
case DEFAULT_VAL_STATE:
// trailing colon. see also LOGBACK-1140
buf.append(CoreConstants.COLON_CHAR);
addLiteralToken(tokenList, buf);
break;
case START_STATE:
// trailing $. see also LOGBACK-1149
buf.append(CoreConstants.DOLLAR);
addLiteralToken(tokenList, buf);
break;
}
return tokenList;
}
|
@Test
public void literalEndingWithDollar_LOGBACK_1149() throws ScanException {
String input = "a$";
Tokenizer tokenizer = new Tokenizer(input);
List<Token> tokenList = tokenizer.tokenize();
witnessList.add(new Token(Token.Type.LITERAL, "a"));
witnessList.add(new Token(Token.Type.LITERAL, "$"));
assertEquals(witnessList, tokenList);
}
|
public static void setGlobalSampler(final String sampler) {
if (StringUtils.isNotBlank(sampler)) {
try {
globalSampler = CountSampler.create(sampler);
} catch (Exception e) {
globalSampler = Sampler.ALWAYS_SAMPLE;
}
}
}
|
@Test
public void testSetGlobalSampler() throws NoSuchFieldException, IllegalAccessException {
LogCollectConfigUtils.setGlobalSampler("1");
Field field = LogCollectConfigUtils.class.getDeclaredField("globalSampler");
field.setAccessible(true);
assertEquals(field.get("const"), Sampler.ALWAYS_SAMPLE);
}
|
public static AwsCredentialsProvider deserializeAwsCredentialsProvider(
String serializedCredentialsProvider) {
return deserialize(serializedCredentialsProvider, AwsCredentialsProvider.class);
}
|
@Test(expected = IllegalArgumentException.class)
public void testFailOnAwsCredentialsProviderDeserialization() {
AwsSerializableUtils.deserializeAwsCredentialsProvider("invalid string");
}
|
public void readDataFromInputStream( InputStream is ) throws KettleException {
// Clear the information
//
clear();
if ( log.isBasic() ) {
log.logBasic( BaseMessages.getString( PKG, "RepositoryMeta.Log.ReadingXMLFile", "FromInputStream" ) );
}
try {
// Check and open XML document
DocumentBuilderFactory dbf = XMLParserFactoryProducer.createSecureDocBuilderFactory();
DocumentBuilder db = dbf.newDocumentBuilder();
Document doc = db.parse( is );
parseRepositoriesDoc( doc );
} catch ( Exception e ) {
throw new KettleException( BaseMessages.getString( PKG, "RepositoryMeta.Error.ReadingInfo" ), e );
}
}
|
@Test
public void testErrorReadingInputStream() throws Exception {
try {
repoMeta.readDataFromInputStream( getClass().getResourceAsStream( "filedoesnotexist.xml" ) );
} catch ( KettleException e ) {
assertEquals( Const.CR
+ "Error reading information from file:" + Const.CR
+ "InputStream cannot be null" + Const.CR, e.getMessage() );
}
}
|
@Override
public Iterator<Map.Entry<String, Object>> iterator() {
return map.entrySet().iterator();
}
|
@Test
public void noProperties() {
CamelMessagingHeadersExtractAdapter adapter = new CamelMessagingHeadersExtractAdapter(map, true);
Iterator<Map.Entry<String, Object>> iterator = adapter.iterator();
assertFalse(iterator.hasNext());
}
|
@Override
public void receivedEvent(Event event, Map<String, Object> metadata) {
log.debug("received event {}, metadata {}", event, metadata);
if (event instanceof ResourceEvent) {
final ResourceAction action = ((ResourceEvent) event).getAction();
final Optional<Class<? extends BaseResource<?, ?, ?, ?, ?>>> resource =
getResourceClass(metadata);
final Optional<String> namespaceOptional = event.getRelatedCustomResourceID().getNamespace();
resource.ifPresent(
aClass -> getCounter(aClass, action.name().toLowerCase(), RESOURCE, EVENT).inc());
if (resource.isPresent() && namespaceOptional.isPresent()) {
getCounter(
resource.get(),
namespaceOptional.get(),
action.name().toLowerCase(),
RESOURCE,
EVENT)
.inc();
}
}
}
|
@Test
void testReceivedEvent() {
Event event = new ResourceEvent(ResourceAction.ADDED, resourceId, buildNamespacedResource());
operatorMetrics.receivedEvent(event, metadata);
Map<String, Metric> metrics = operatorMetrics.metricRegistry().getMetrics();
assertEquals(2, metrics.size());
assertTrue(metrics.containsKey("sparkapplication.added.resource.event"));
assertTrue(metrics.containsKey("sparkapplication.testns.added.resource.event"));
}
|
@SuppressWarnings("unchecked")
private void publishContainerResumedEvent(
ContainerEvent event) {
if (publishNMContainerEvents) {
ContainerResumeEvent resumeEvent = (ContainerResumeEvent) event;
ContainerId containerId = resumeEvent.getContainerID();
ContainerEntity entity = createContainerEntity(containerId);
Map<String, Object> entityInfo = new HashMap<String, Object>();
entityInfo.put(ContainerMetricsConstants.DIAGNOSTICS_INFO,
resumeEvent.getDiagnostic());
entity.setInfo(entityInfo);
Container container = context.getContainers().get(containerId);
if (container != null) {
TimelineEvent tEvent = new TimelineEvent();
tEvent.setId(ContainerMetricsConstants.RESUMED_EVENT_TYPE);
tEvent.setTimestamp(event.getTimestamp());
entity.addEvent(tEvent);
dispatcher.getEventHandler().handle(new TimelinePublishEvent(entity,
containerId.getApplicationAttemptId().getApplicationId()));
}
}
}
|
@Test
public void testPublishContainerResumedEvent() {
ApplicationId appId = ApplicationId.newInstance(0, 1);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
ContainerId cId = ContainerId.newContainerId(appAttemptId, 1);
ContainerEvent containerEvent =
new ContainerResumeEvent(cId, "test resume");
publisher.createTimelineClient(appId);
publisher.publishContainerEvent(containerEvent);
publisher.stopTimelineClient(appId);
dispatcher.await();
ContainerEntity cEntity = new ContainerEntity();
cEntity.setId(cId.toString());
TimelineEntity[] lastPublishedEntities =
timelineClient.getLastPublishedEntities();
Assert.assertNotNull(lastPublishedEntities);
Assert.assertEquals(1, lastPublishedEntities.length);
TimelineEntity entity = lastPublishedEntities[0];
Assert.assertEquals(cEntity, entity);
NavigableSet<TimelineEvent> events = entity.getEvents();
Assert.assertEquals(1, events.size());
Assert.assertEquals(ContainerMetricsConstants.RESUMED_EVENT_TYPE,
events.iterator().next().getId());
Map<String, Object> info = entity.getInfo();
Assert.assertTrue(
info.containsKey(ContainerMetricsConstants.DIAGNOSTICS_INFO));
Assert.assertEquals("test resume",
info.get(ContainerMetricsConstants.DIAGNOSTICS_INFO));
}
|
@Deprecated
public static <T> Task<T> withSideEffect(final Task<T> parent, final Task<?> sideEffect) {
return parent.withSideEffect(t -> sideEffect);
}
|
@Test
public void testSideEffectPartialCompletion() throws InterruptedException {
// ensure that the whole can finish before the individual side effect task finishes.
Task<String> fastTask = new BaseTask<String>() {
@Override
protected Promise<? extends String> run(Context context) throws Exception {
return Promises.value("fast");
}
};
// this task will not complete.
Task<String> settableTask = new BaseTask<String>() {
@Override
protected Promise<? extends String> run(Context context) throws Exception {
return Promises.settable();
}
};
Task<String> withSideEffect = fastTask.withSideEffect(x -> settableTask);
runAndWait("TestTasks.testSideEffectPartialCompletion", withSideEffect);
assertTrue(withSideEffect.isDone());
assertTrue(fastTask.isDone());
assertFalse(settableTask.isDone());
}
|
public static DataType convertToDataType(String avroSchemaString) {
return convertToDataType(avroSchemaString, true);
}
|
@Test
void testConvertAvroSchemaToDataType() {
final String schema = User.getClassSchema().toString(true);
validateUserSchema(AvroSchemaConverter.convertToDataType(schema));
}
|
public static HbaseSQLReaderConfig parseConfig(Configuration cfg) {
return HbaseSQLReaderConfig.parse(cfg);
}
|
@Test
public void testParseConfig() {
Configuration config = Configuration.from(jsonStr);
HbaseSQLReaderConfig readerConfig = HbaseSQLHelper.parseConfig(config);
System.out.println("tablenae = " +readerConfig.getTableName() +",zk = " +readerConfig.getZkUrl());
assertEquals("TABLE1", readerConfig.getTableName());
assertEquals("hb-proxy-pub-xxx-001.hbase.rds.aliyuncs.com,hb-proxy-pub-xxx-002.hbase.rds.aliyuncs.com,hb-proxy-pub-xxx-003.hbase.rds.aliyuncs.com:2181", readerConfig.getZkUrl());
}
|
@Override
public CompletableFuture<Void> process() {
return CompletableFuture.runAsync(
() -> {
try (WriteBatch writeBatch =
new WriteBatch(batchRequest.size() * PER_RECORD_ESTIMATE_BYTES)) {
for (ForStDBPutRequest<?, ?, ?> request : batchRequest) {
if (request.valueIsNull()) {
// put(key, null) == delete(key)
writeBatch.delete(
request.getColumnFamilyHandle(),
request.buildSerializedKey());
} else {
writeBatch.put(
request.getColumnFamilyHandle(),
request.buildSerializedKey(),
request.buildSerializedValue());
}
}
db.write(writeOptions, writeBatch);
for (ForStDBPutRequest<?, ?, ?> request : batchRequest) {
request.completeStateFuture();
}
} catch (Exception e) {
String msg = "Error while write batch data to ForStDB.";
for (ForStDBPutRequest<?, ?, ?> request : batchRequest) {
// fail every state request in this batch
request.completeStateFutureExceptionally(msg, e);
}
// fail the whole batch operation
throw new CompletionException(msg, e);
}
},
executor);
}
|
@Test
public void testValueStateWriteBatch() throws Exception {
ForStValueState<Integer, VoidNamespace, String> valueState1 =
buildForStValueState("test-write-batch-1");
ForStValueState<Integer, VoidNamespace, String> valueState2 =
buildForStValueState("test-write-batch-2");
List<ForStDBPutRequest<?, ?, ?>> batchPutRequest = new ArrayList<>();
int keyNum = 100;
for (int i = 0; i < keyNum; i++) {
batchPutRequest.add(
ForStDBPutRequest.of(
buildContextKey(i),
String.valueOf(i),
((i % 2 == 0) ? valueState1 : valueState2),
new TestStateFuture<>()));
}
ExecutorService executor = Executors.newFixedThreadPool(2);
ForStWriteBatchOperation writeBatchOperation =
new ForStWriteBatchOperation(db, batchPutRequest, new WriteOptions(), executor);
writeBatchOperation.process().get();
// check data correctness
for (ForStDBPutRequest<?, ?, ?> request : batchPutRequest) {
byte[] keyBytes = request.buildSerializedKey();
byte[] valueBytes = db.get(request.getColumnFamilyHandle(), keyBytes);
assertArrayEquals(valueBytes, request.buildSerializedValue());
}
}
|
@Override
public void start() {
if (isDisabled()) {
LOG.debug(MESSAGE_SCM_STEP_IS_DISABLED_BY_CONFIGURATION);
return;
}
if (settings.hasKey(SCM_PROVIDER_KEY)) {
settings.get(SCM_PROVIDER_KEY).ifPresent(this::setProviderIfSupported);
} else {
autodetection();
if (this.provider == null) {
considerOldScmUrl();
}
if (this.provider == null) {
String message = "SCM provider autodetection failed. Please use \"" + SCM_PROVIDER_KEY + "\" to define SCM of " +
"your project, or disable the SCM Sensor in the project settings.";
LOG.warn(message);
analysisWarnings.addUnique(message);
}
}
if (isExclusionDisabled()) {
LOG.info(MESSAGE_SCM_EXCLUSIONS_IS_DISABLED_BY_CONFIGURATION);
}
}
|
@Test
void fail_when_considerOldScmUrl_finds_invalid_provider_in_link() {
when(settings.get(ScannerProperties.LINKS_SOURCES_DEV)).thenReturn(Optional.of("scm:invalid"));
assertThatThrownBy(() -> underTest.start())
.isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("no SCM provider found for this key");
}
|
@Override
public void add(String field, String value, Map<String, String[]> data) {
if (! include(field, value)) {
return;
}
if (ALWAYS_SET_FIELDS.contains(field)) {
setAlwaysInclude(field, value, data);
return;
} else if (ALWAYS_ADD_FIELDS.contains(field)) {
addAlwaysInclude(field, value, data);
return;
}
StringSizePair filterKey = filterKey(field, value, data);
if (! data.containsKey(filterKey.string)) {
setFilterKey(filterKey, value, data);
return;
}
String[] vals = data.get(filterKey.string);
if (vals != null && vals.length >= maxValuesPerField) {
setTruncated(data);
return;
}
Integer fieldSizeInteger = fieldSizes.get(filterKey.string);
int fieldSize = fieldSizeInteger == null ? 0 : fieldSizeInteger;
int maxAllowed = maxAllowedToAdd(filterKey);
if (maxAllowed <= 0) {
setTruncated(data);
return;
}
int valueLength = estimateSize(value);
String toAdd = value;
if (valueLength > maxAllowed) {
toAdd = truncate(value, maxAllowed, data);
valueLength = estimateSize(toAdd);
if (valueLength == 0) {
return;
}
}
int addedOverall = valueLength;
if (fieldSizeInteger == null) {
//if there was no value before, we're adding
//a key. If there was a value before, do not
//add the key length.
addedOverall += filterKey.size;
}
estimatedSize += addedOverall;
fieldSizes.put(filterKey.string, valueLength + fieldSize);
data.put(filterKey.string, appendValue(data.get(filterKey.string), toAdd ));
}
|
@Test
public void testMetadataFactoryFieldsConfig() throws Exception {
TikaConfig tikaConfig =
new TikaConfig(TikaConfigTest.class.getResourceAsStream("TIKA-3695-fields.xml"));
AutoDetectParserConfig config = tikaConfig.getAutoDetectParserConfig();
MetadataWriteFilterFactory factory = config.getMetadataWriteFilterFactory();
assertEquals(241, ((StandardWriteFilterFactory) factory).getMaxTotalEstimatedBytes());
assertEquals(999, ((StandardWriteFilterFactory) factory).getMaxKeySize());
assertEquals(10001, ((StandardWriteFilterFactory) factory).getMaxFieldSize());
AutoDetectParser parser = new AutoDetectParser(tikaConfig);
String mock = "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>" +
"<mock>";
mock += "<metadata action=\"add\" name=\"dc:subject\">this is not a title</metadata>";
mock += "<metadata action=\"add\" name=\"dc:title\">this is a title</metadata>";
for (int i = 0; i < 20; i++) {
mock += "<metadata action=\"add\" name=\"dc:creator\">01234567890123456789</metadata>";
}
mock += "<write element=\"p\" times=\"30\"> hello </write>\n";
mock += "</mock>";
Metadata metadata = new Metadata();
metadata.add("dc:creator", "abcdefghijabcdefghij");
metadata.add("not-allowed", "not-allowed");
List<Metadata> metadataList =
getRecursiveMetadata(new ByteArrayInputStream(mock.getBytes(StandardCharsets.UTF_8)),
parser, metadata, new ParseContext(), true);
assertEquals(1, metadataList.size());
metadata = metadataList.get(0);
//test that this was removed during the filter existing stage
assertNull(metadata.get("not-allowed"));
//test that this was not allowed because it isn't in the "include" list
assertNull(metadata.get("dc:subject"));
String[] creators = metadata.getValues("dc:creator");
assertEquals("abcdefghijabcdefghij", creators[0]);
//this gets more than the other test because this is filtering out some fields
assertEquals(3, creators.length);
assertEquals("012345678901234", creators[2]);
assertContainsCount(" hello ", metadata.get(TikaCoreProperties.TIKA_CONTENT), 30);
assertTruncated(metadata);
}
|
public FEELFnResult<Object> invoke(@ParameterName("list") List list) {
if ( list == null || list.isEmpty() ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null or empty"));
} else {
try {
return FEELFnResult.ofResult(Collections.max(list, new InterceptNotComparableComparator()));
} catch (ClassCastException e) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "contains items that are not comparable"));
}
}
}
|
@Test
void invokeArrayWithHeterogenousTypes() {
FunctionTestUtil.assertResultError(maxFunction.invoke(new Object[]{1, "test", BigDecimal.valueOf(10.2)}),
InvalidParametersEvent.class);
}
|
synchronized void add(int splitCount) {
int pos = count % history.length;
history[pos] = splitCount;
count += 1;
}
|
@Test
public void testTwoMoreThanFullHistory() {
EnumerationHistory history = new EnumerationHistory(3);
history.add(1);
history.add(2);
history.add(3);
history.add(4);
history.add(5);
int[] expectedHistorySnapshot = {3, 4, 5};
testHistory(history, expectedHistorySnapshot);
}
|
public static CreateOptions defaults(AlluxioConfiguration conf) {
return new CreateOptions(conf.getString(PropertyKey.SECURITY_AUTHORIZATION_PERMISSION_UMASK));
}
|
@Test
public void defaults() throws IOException {
CreateOptions options = CreateOptions.defaults(mConfiguration);
assertFalse(options.getCreateParent());
assertFalse(options.isEnsureAtomic());
assertNull(options.getOwner());
assertNull(options.getGroup());
String umask = mConfiguration.getString(PropertyKey.SECURITY_AUTHORIZATION_PERMISSION_UMASK);
assertEquals(ModeUtils.applyFileUMask(Mode.defaults(), umask), options.getMode());
}
|
@Override
public final short readShort() throws EOFException {
short s = readShort(pos);
pos += SHORT_SIZE_IN_BYTES;
return s;
}
|
@Test
public void testReadShortByteOrder() throws Exception {
short read = in.readShort(LITTLE_ENDIAN);
short val = Bits.readShortL(INIT_DATA, 0);
assertEquals(val, read);
}
|
@Override
public void handlerAdded(ChannelHandlerContext ctx) throws Exception {
// Initialize the encoder, decoder, flow controllers, and internal state.
encoder.lifecycleManager(this);
decoder.lifecycleManager(this);
encoder.flowController().channelHandlerContext(ctx);
decoder.flowController().channelHandlerContext(ctx);
byteDecoder = new PrefaceDecoder(ctx);
}
|
@Test
public void verifyChannelHandlerCanBeReusedInPipeline() throws Exception {
when(connection.isServer()).thenReturn(true);
handler = newHandler();
// Only read the connection preface...after preface is read internal state of Http2ConnectionHandler
// is expected to change relative to the pipeline.
ByteBuf preface = connectionPrefaceBuf();
handler.channelRead(ctx, preface);
verify(decoder, never()).decodeFrame(any(ChannelHandlerContext.class),
any(ByteBuf.class), ArgumentMatchers.<List<Object>>any());
// Now remove and add the handler...this is setting up the test condition.
handler.handlerRemoved(ctx);
handler.handlerAdded(ctx);
// Now verify we can continue as normal, reading connection preface plus more.
ByteBuf prefacePlusSome = addSettingsHeader(Unpooled.buffer().writeBytes(connectionPrefaceBuf()));
handler.channelRead(ctx, prefacePlusSome);
verify(decoder, atLeastOnce()).decodeFrame(eq(ctx), any(ByteBuf.class), ArgumentMatchers.<List<Object>>any());
}
|
@Override
public Collection<String> getJdbcUrlPrefixes() {
return Collections.singletonList(String.format("jdbc:%s:", getType().toLowerCase()));
}
|
@Test
void assertGetJdbcUrlPrefixes() {
assertThat(TypedSPILoader.getService(DatabaseType.class, "Oracle").getJdbcUrlPrefixes(), is(Collections.singletonList("jdbc:oracle:")));
}
|
@Override
public void onNext(final T next) {
}
|
@Test
public void onNext() {
completeObserver.onNext(new Object());
}
|
@Override
public AbstractByteBuf encode(Object object, Map<String, String> context) {
CustomHessianSerializer serializer = getCustomSerializer(object);
if (serializer != null) {
return serializer.encodeObject(object, context);
} else {
UnsafeByteArrayOutputStream byteArray = new UnsafeByteArrayOutputStream();
Hessian2Output output = new Hessian2Output(byteArray);
try {
output.setSerializerFactory(serializerFactory);
output.writeObject(object);
output.close();
return new ByteStreamWrapperByteBuf(byteArray);
} catch (Exception e) {
throw buildSerializeError(e.getMessage(), e);
}
}
}
|
@Test
public void encode() {
AbstractByteBuf data = serializer.encode("xxx", null);
String dst = (String) serializer.decode(data, String.class, null);
Assert.assertEquals("xxx", dst);
boolean error = false;
try {
serializer.decode(data, "", null);
} catch (Exception e) {
error = true;
}
Assert.assertTrue(error);
error = false;
try {
serializer.decode(data, null, null);
} catch (Exception e) {
error = true;
}
Assert.assertTrue(error);
error = false;
try {
serializer.decode(data, (Object) null, null);
} catch (Exception e) {
error = true;
}
Assert.assertTrue(error);
}
|
public Node getLeader(String clusterName) {
Map<String/*raft-group*/, Node> map = leaders.computeIfAbsent(clusterName, k -> new ConcurrentHashMap<>());
List<Node> nodes = new ArrayList<>(map.values());
return nodes.size() > 0 ? nodes.get(ThreadLocalRandom.current().nextInt(nodes.size())) : null;
}
|
@Test
public void testGetLeader() {
Assertions.assertNull(metadata.getLeader("leader"));
Node node = new Node();
node.setGroup("group");
metadata.setLeaderNode("leader", node);
Assertions.assertNotNull(metadata.getLeader("leader"));
}
|
String getCertificate() {
return configuration.get(CERTIFICATE).orElseThrow(() -> new IllegalArgumentException("Identity provider certificate is missing"));
}
|
@Test
public void fail_to_get_certificate_when_null() {
assertThatThrownBy(() -> underTest.getCertificate())
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Identity provider certificate is missing");
}
|
public static <K> KStreamHolder<K> build(
final KStreamHolder<K> stream,
final StreamSelectKey<K> selectKey,
final RuntimeBuildContext buildContext
) {
return build(stream, selectKey, buildContext, PartitionByParamsFactory::build);
}
|
@Test
public void shouldOnlyMap() {
// When:
StreamSelectKeyBuilder
.build(stream, selectKey, buildContext, paramBuilder);
// Then:
verify(kstream).map(any(), any());
verifyNoMoreInteractions(kstream, rekeyedKstream);
}
|
public static String substringAfter(String s, String splitter) {
final int indexOf = s.indexOf(splitter);
return indexOf >= 0 ? s.substring(indexOf + splitter.length()) : null;
}
|
@Test
void testSubstringAfterSplitterSingleChar() {
assertThat(substringAfter("15", "-")).isNull();
assertThat(substringAfter("15-ea", "-")).isEqualTo("ea");
}
|
@Override
public Map<String, ScannerPlugin> installRequiredPlugins() {
LOG.info("Loading required plugins");
InstallResult result = installPlugins(p -> p.getRequiredForLanguages() == null || p.getRequiredForLanguages().isEmpty());
LOG.debug("Plugins not loaded because they are optional: {}", result.skippedPlugins);
return result.installedPluginsByKey;
}
|
@Test
public void reload_list_if_plugin_uninstalled_during_blue_green_switch() throws IOException {
WsTestUtil.mockReader(wsClient, "api/plugins/installed",
new InputStreamReader(getClass().getResourceAsStream("ScannerPluginInstallerTest/blue-installed.json")),
new InputStreamReader(getClass().getResourceAsStream("ScannerPluginInstallerTest/green-installed.json")));
enqueueNotFoundDownload("scmgit", "abc");
enqueueDownload("java", "def");
enqueueDownload("cobol", "ghi");
Map<String, ScannerPlugin> result = underTest.installRequiredPlugins();
assertThat(result.keySet()).containsExactlyInAnyOrder("java", "cobol");
}
|
public static PipelineOptions create() {
return new Builder().as(PipelineOptions.class);
}
|
@Test
public void testOptionsIdIsSet() throws Exception {
ObjectMapper mapper =
new ObjectMapper()
.registerModules(ObjectMapper.findModules(ReflectHelpers.findClassLoader()));
PipelineOptions options = PipelineOptionsFactory.create();
// We purposely serialize/deserialize to get another instance. This allows to test if the
// default has been set or not.
PipelineOptions clone =
mapper.readValue(mapper.writeValueAsString(options), PipelineOptions.class);
// It is important that we don't call getOptionsId() before we have created the clone.
assertEquals(options.getOptionsId(), clone.getOptionsId());
}
|
@Override
public NativeQuerySpec<Record> select(String sql, Object... args) {
return new NativeQuerySpecImpl<>(this, sql, args, DefaultRecord::new, false);
}
|
@Test
public void testNative() {
database.dml()
.insert("s_test_event")
.value("id", "helper_testNative")
.value("name", "Ename2")
.execute()
.sync();
database.dml()
.insert("s_test")
.value("id", "helper_testNative")
.value("name", "main2")
.value("age", 20)
.execute()
.sync();
DefaultQueryHelper helper = new DefaultQueryHelper(database);
QueryParamEntity param = QueryParamEntity
.newQuery()
.is("e.id", "helper_testNative")
.is("t.age", "20")
.orderByAsc("t.age")
.getParam();
{
Sort sortByValue = new Sort();
sortByValue.setName("t.id");
sortByValue.setValue("1");
param.getSorts().add(sortByValue);
}
{
Sort sortByValue = new Sort();
sortByValue.setName("t.id");
sortByValue.setValue("2");
param.getSorts().add(sortByValue);
}
helper.select("select t.*,e.*,e.name ename,e.id `x.id` from s_test t " +
"left join s_test_event e on e.id = t.id " +
"where t.age = ?", 20)
.logger(LoggerFactory.getLogger("org.hswebframework.test.native"))
.where(param)
.fetchPaged()
.doOnNext(v -> System.out.println(JSON.toJSONString(v, SerializerFeature.PrettyFormat)))
.as(StepVerifier::create)
.expectNextCount(1)
.verifyComplete();
helper.select("select id,name from s_test t " +
"union all select id,name from s_test_event")
.where(dsl -> dsl
.is("id", "helper_testNative")
.orderByAsc("name"))
.fetchPaged()
.doOnNext(v -> System.out.println(JSON.toJSONString(v, SerializerFeature.PrettyFormat)))
.as(StepVerifier::create)
.expectNextCount(1)
.verifyComplete();
}
|
@Override
public String grantAccessRequestBody(List<SecurityAuthConfig> authConfigs, Map<String, String> authSessionContext) {
Map<String, Object> requestMap = new HashMap<>();
requestMap.put("auth_configs", getAuthConfigs(authConfigs));
requestMap.put("auth_session", authSessionContext);
return GSON.toJson(requestMap);
}
|
@Test
void grantAccessRequestBodyTests() {
String json = converter.grantAccessRequestBody(List.of(new SecurityAuthConfig("github", "cd.go.github", create("url", false, "some-url"))), Map.of("foo", "bar"));
String expectedRequestBody = """
{
"auth_configs": [
{
"id": "github",
"configuration": {
"url": "some-url"
}
}
],
"auth_session": {
"foo": "bar"
}
}""";
assertThatJson(json).isEqualTo(expectedRequestBody);
}
|
public boolean shouldRecord() {
return this.recordingLevel.shouldRecord(config.recordLevel().id);
}
|
@Test
public void testShouldRecordForDebugLevelSensor() {
Sensor debugSensor = new Sensor(null, "debugSensor", null, INFO_CONFIG, Time.SYSTEM,
0, Sensor.RecordingLevel.DEBUG);
assertFalse(debugSensor.shouldRecord());
debugSensor = new Sensor(null, "debugSensor", null, DEBUG_CONFIG, Time.SYSTEM,
0, Sensor.RecordingLevel.DEBUG);
assertTrue(debugSensor.shouldRecord());
debugSensor = new Sensor(null, "debugSensor", null, TRACE_CONFIG, Time.SYSTEM,
0, Sensor.RecordingLevel.DEBUG);
assertTrue(debugSensor.shouldRecord());
}
|
@Override
public void delete(String propertyKey) {
checkPropertyKey(propertyKey);
try (DbSession dbSession = dbClient.openSession(false)) {
dbClient.internalPropertiesDao().delete(dbSession, propertyKey);
dbSession.commit();
}
}
|
@Test
public void delete_shouldCallDaoAndDeleteProperty() {
underTest.delete(SOME_KEY);
verify(internalPropertiesDao).delete(dbSession, SOME_KEY);
verify(dbSession).commit();
}
|
public static WindowStoreIterator<ValueAndTimestamp<GenericRow>> fetch(
final ReadOnlyWindowStore<GenericKey, ValueAndTimestamp<GenericRow>> store,
final GenericKey key,
final Instant lower,
final Instant upper
) {
Objects.requireNonNull(key, "key can't be null");
final List<ReadOnlyWindowStore<GenericKey, ValueAndTimestamp<GenericRow>>> stores
= getStores(store);
final Function<ReadOnlyWindowStore<GenericKey, ValueAndTimestamp<GenericRow>>,
WindowStoreIterator<ValueAndTimestamp<GenericRow>>> fetchFunc = windowStore ->
fetchUncached(windowStore, key, lower, upper);
return findFirstNonEmptyIterator(stores, fetchFunc);
}
|
@Test
public void shouldThrowException_InvalidStateStoreException() throws IllegalAccessException {
when(provider.stores(any(), any())).thenReturn(ImmutableList.of(meteredWindowStore));
SERDES_FIELD.set(meteredWindowStore, serdes);
when(serdes.rawKey(any())).thenReturn(BYTES);
when(meteredWindowStore.wrapped()).thenReturn(windowStore);
when(windowStore.fetch(any(), any(), any())).thenThrow(
new InvalidStateStoreException("Invalid"));
final Exception e = assertThrows(
InvalidStateStoreException.class,
() -> WindowStoreCacheBypass.fetch(store, SOME_KEY,
Instant.ofEpochMilli(100), Instant.ofEpochMilli(200))
);
assertThat(e.getMessage(), containsString("State store is not "
+ "available anymore and may have been migrated to another instance"));
}
|
public static StatementExecutorResponse execute(
final ConfiguredStatement<ListTypes> configuredStatement,
final SessionProperties sessionProperties,
final KsqlExecutionContext executionContext,
final ServiceContext serviceContext
) {
final ImmutableMap.Builder<String, SchemaInfo> types = ImmutableMap.builder();
final Iterator<CustomType> customTypes = executionContext.getMetaStore().types();
while (customTypes.hasNext()) {
final CustomType customType = customTypes.next();
types.put(customType.getName(), EntityUtil.schemaInfo(customType.getType()));
}
return StatementExecutorResponse.handled(Optional.of(
new TypeList(configuredStatement.getMaskedStatementText(), types.build())));
}
|
@Test
public void shouldListTypes() {
// When:
final Optional<KsqlEntity> entity = ListTypesExecutor.execute(
ConfiguredStatement.of(PreparedStatement.of("statement", new ListTypes(Optional.empty())),
SessionConfig.of(KSQL_CONFIG, ImmutableMap.of())),
mock(SessionProperties.class),
context,
null
).getEntity();
// Then:
assertThat("expected a response", entity.isPresent());
assertThat(((TypeList) entity.get()).getTypes(), is(ImmutableMap.of(
"foo", EntityUtil.schemaInfo(SqlPrimitiveType.of(SqlBaseType.STRING))
)));
}
|
public boolean isDeleteSyncSupported() {
return allDevicesHaveCapability(DeviceCapabilities::deleteSync);
}
|
@Test
void isDeleteSyncSupported() {
assertTrue(AccountsHelper.generateTestAccount("+18005551234", UUID.randomUUID(), UUID.randomUUID(),
List.of(deleteSyncCapableDevice),
"1234".getBytes(StandardCharsets.UTF_8)).isDeleteSyncSupported());
assertFalse(AccountsHelper.generateTestAccount("+18005551234", UUID.randomUUID(), UUID.randomUUID(),
List.of(deleteSyncIncapableDevice, deleteSyncCapableDevice),
"1234".getBytes(StandardCharsets.UTF_8)).isDeleteSyncSupported());
}
|
public void replaceData(int index, T newScesimData) {
scesimData.set(index, newScesimData);
}
|
@Test
public void replaceData() {
final Scenario replaced = model.getDataByIndex(3);
final Scenario replacement = new Scenario();
model.replaceData(3, replacement);
assertThat(model.scesimData).hasSize(SCENARIO_DATA).doesNotContain(replaced);
assertThat(model.scesimData.get(3)).isEqualTo(replacement);
}
|
@Override
public String toString() {
return String.format("(%s, %s, %s)", classname, name, actions);
}
|
@Test
public void testToString() {
Permission permission = new Permission("classname", "name", "actions");
assertEquals("(classname, name, actions)", permission.toString());
}
|
@Override
public void deleteArticle(Long id) {
// 校验存在
validateArticleExists(id);
// 删除
articleMapper.deleteById(id);
}
|
@Test
public void testDeleteArticle_notExists() {
// 准备参数
Long id = randomLongId();
// 调用, 并断言异常
assertServiceException(() -> articleService.deleteArticle(id), ARTICLE_NOT_EXISTS);
}
|
public static boolean isLegalColumn(String col) {
int len = col.length();
for (int i = 0; i < len; i++) {
char c = col.charAt(i);
if (c == '_' || c == '$' || Character.isLetterOrDigit(c)) {
continue;
}
return false;
}
return true;
}
|
@Test
void testLegal(){
assertTrue(QueryHelperUtils.isLegalColumn("test_name"));
assertFalse(QueryHelperUtils.isLegalColumn("test-name"));
assertFalse(QueryHelperUtils.isLegalColumn("test\nname"));
}
|
public static WorkflowInstanceAggregatedInfo computeAggregatedView(
WorkflowInstance workflowInstance, boolean statusKnown) {
if (workflowInstance == null) {
// returning empty object since cannot access state of the current instance run
return new WorkflowInstanceAggregatedInfo();
}
WorkflowInstanceAggregatedInfo instanceAggregated =
computeAggregatedViewNoStatus(workflowInstance);
if (statusKnown || workflowInstance.getAggregatedInfo() == null) {
instanceAggregated.setWorkflowInstanceStatus(workflowInstance.getStatus());
} else {
computeAndSetAggregatedInstanceStatus(workflowInstance, instanceAggregated);
}
return instanceAggregated;
}
|
@Test
public void testAggregatedViewDoublePreviouslySucceeded() {
WorkflowInstance run1 =
getGenericWorkflowInstance(
1, WorkflowInstance.Status.SUCCEEDED, RunPolicy.START_FRESH_NEW_RUN, null);
Workflow runtimeWorkflow = mock(Workflow.class);
Map<String, StepRuntimeState> decodedOverview = new LinkedHashMap<>();
decodedOverview.put("step1", generateStepState(StepInstance.Status.SUCCEEDED, 1L, 2L));
decodedOverview.put("step2", generateStepState(StepInstance.Status.SUCCEEDED, 3L, 4L));
decodedOverview.put("step3", generateStepState(StepInstance.Status.SUCCEEDED, 5L, 6L));
decodedOverview.put("step4", generateStepState(StepInstance.Status.SUCCEEDED, 7L, 8L));
decodedOverview.put("step5", generateStepState(StepInstance.Status.SUCCEEDED, 9L, 10L));
WorkflowRuntimeOverview wro = mock(WorkflowRuntimeOverview.class);
doReturn(decodedOverview).when(wro).decodeStepOverview(run1.getRuntimeDag());
run1.setRuntimeOverview(wro);
run1.setRuntimeWorkflow(runtimeWorkflow);
run1.getRuntimeDag().put("step4", new StepTransition());
run1.getRuntimeDag().put("step5", new StepTransition());
WorkflowInstanceAggregatedInfo aggregated =
AggregatedViewHelper.computeAggregatedView(run1, false);
assertEquals(1L, aggregated.getStepAggregatedViews().get("step1").getStartTime().longValue());
assertEquals(3L, aggregated.getStepAggregatedViews().get("step2").getStartTime().longValue());
assertEquals(5L, aggregated.getStepAggregatedViews().get("step3").getStartTime().longValue());
assertEquals(7L, aggregated.getStepAggregatedViews().get("step4").getStartTime().longValue());
assertEquals(9L, aggregated.getStepAggregatedViews().get("step5").getStartTime().longValue());
assertEquals(WorkflowInstance.Status.SUCCEEDED, aggregated.getWorkflowInstanceStatus());
WorkflowInstance run2 =
getGenericWorkflowInstance(
2,
WorkflowInstance.Status.FAILED,
RunPolicy.RESTART_FROM_SPECIFIC,
RestartPolicy.RESTART_FROM_BEGINNING);
Map<String, StepRuntimeState> decodedOverview2 = new LinkedHashMap<>();
decodedOverview2.put("step3", generateStepState(StepInstance.Status.FATALLY_FAILED, 11L, 12L));
decodedOverview2.put("step4", generateStepState(StepInstance.Status.NOT_CREATED, 14L, 15L));
decodedOverview2.put("step5", generateStepState(StepInstance.Status.NOT_CREATED, 16L, 17L));
Map<String, StepTransition> run2Dag = new LinkedHashMap<>();
run2Dag.put("step3", new StepTransition());
run2Dag.put("step4", new StepTransition());
run2Dag.put("step5", new StepTransition());
run2.setRuntimeDag(run2Dag);
doReturn(run1)
.when(workflowInstanceDao)
.getWorkflowInstanceRun(run2.getWorkflowId(), run2.getWorkflowInstanceId(), 1L);
run2.setAggregatedInfo(AggregatedViewHelper.computeAggregatedView(run1, false));
assertEquals(5, run2.getAggregatedInfo().getStepAggregatedViews().size());
assertEquals(
StepInstance.Status.SUCCEEDED,
run2.getAggregatedInfo().getStepAggregatedViews().get("step1").getStatus());
assertEquals(
StepInstance.Status.SUCCEEDED,
run2.getAggregatedInfo().getStepAggregatedViews().get("step2").getStatus());
assertEquals(
StepInstance.Status.SUCCEEDED,
run2.getAggregatedInfo().getStepAggregatedViews().get("step3").getStatus());
assertEquals(
StepInstance.Status.SUCCEEDED,
run2.getAggregatedInfo().getStepAggregatedViews().get("step4").getStatus());
assertEquals(
StepInstance.Status.SUCCEEDED,
run2.getAggregatedInfo().getStepAggregatedViews().get("step5").getStatus());
assertEquals(
1L,
run2.getAggregatedInfo().getStepAggregatedViews().get("step1").getStartTime().longValue());
assertEquals(
3L,
run2.getAggregatedInfo().getStepAggregatedViews().get("step2").getStartTime().longValue());
assertEquals(5L, aggregated.getStepAggregatedViews().get("step3").getStartTime().longValue());
assertEquals(7L, aggregated.getStepAggregatedViews().get("step4").getStartTime().longValue());
assertEquals(9L, aggregated.getStepAggregatedViews().get("step5").getStartTime().longValue());
WorkflowRuntimeOverview wro2 = mock(WorkflowRuntimeOverview.class);
doReturn(decodedOverview2).when(wro2).decodeStepOverview(run2.getRuntimeDag());
run2.setRuntimeOverview(wro2);
run2.setRuntimeWorkflow(runtimeWorkflow);
WorkflowInstanceAggregatedInfo aggregated2 =
AggregatedViewHelper.computeAggregatedView(run2, false);
assertEquals(5, aggregated2.getStepAggregatedViews().size());
assertEquals(
StepInstance.Status.SUCCEEDED,
aggregated2.getStepAggregatedViews().get("step1").getStatus());
assertEquals(
StepInstance.Status.SUCCEEDED,
aggregated2.getStepAggregatedViews().get("step2").getStatus());
assertEquals(
StepInstance.Status.FATALLY_FAILED,
aggregated2.getStepAggregatedViews().get("step3").getStatus());
assertEquals(
StepInstance.Status.NOT_CREATED,
aggregated2.getStepAggregatedViews().get("step4").getStatus());
assertEquals(
StepInstance.Status.NOT_CREATED,
aggregated2.getStepAggregatedViews().get("step5").getStatus());
assertEquals(1L, aggregated2.getStepAggregatedViews().get("step1").getStartTime().longValue());
assertEquals(3L, aggregated2.getStepAggregatedViews().get("step2").getStartTime().longValue());
assertEquals(11L, aggregated2.getStepAggregatedViews().get("step3").getStartTime().longValue());
assertEquals(14L, aggregated2.getStepAggregatedViews().get("step4").getStartTime().longValue());
assertEquals(16L, aggregated2.getStepAggregatedViews().get("step5").getStartTime().longValue());
assertEquals(WorkflowInstance.Status.FAILED, aggregated2.getWorkflowInstanceStatus());
}
|
static void urlEncode(String str, StringBuilder sb) {
for (int idx = 0; idx < str.length(); ++idx) {
char c = str.charAt(idx);
if ('+' == c) {
sb.append("%2B");
} else if ('%' == c) {
sb.append("%25");
} else {
sb.append(c);
}
}
}
|
@Test
void testUrlEncodeByPercent() {
// Arrange
final StringBuilder sb = new StringBuilder("??????");
// Act
GroupKey2.urlEncode("%", sb);
// Assert side effects
assertNotNull(sb);
assertEquals("??????%25", sb.toString());
}
|
@Override
public V takeLast() throws InterruptedException {
return commandExecutor.getInterrupted(takeLastAsync());
}
|
@Test
public void testTakeLast() throws InterruptedException {
RBlockingDeque<Integer> deque = redisson.getPriorityBlockingDeque("queue:take");
deque.add(1);
deque.add(2);
deque.add(3);
deque.add(4);
assertThat(deque.takeLast()).isEqualTo(4);
assertThat(deque.takeLast()).isEqualTo(3);
assertThat(deque.takeLast()).isEqualTo(2);
assertThat(deque.takeLast()).isEqualTo(1);
assertThat(deque.size()).isZero();
}
|
@Override
public void getConfig(CloudTokenDataPlaneFilterConfig.Builder builder) {
var clientsCfg = clients.stream()
.filter(c -> !c.tokens().isEmpty())
.map(x -> new CloudTokenDataPlaneFilterConfig.Clients.Builder()
.id(x.id())
.tokens(tokensConfig(x.tokens()))
.permissions(x.permissions().stream().map(Client.Permission::asString).sorted().toList()))
.toList();
builder.clients(clientsCfg).tokenContext(tokenContext);
}
|
@Test
void generates_correct_config_for_tokens() throws IOException {
var certFile = securityFolder.resolve("foo.pem");
var clusterElem = DomBuilderTest.parse(servicesXmlTemplate.formatted(applicationFolder.toPath().relativize(certFile).toString()));
createCertificate(certFile);
buildModel(Set.of(tokenEndpoint, mtlsEndpoint), defaultTokens, clusterElem);
var cfg = root.getConfig(CloudTokenDataPlaneFilterConfig.class, filterConfigId);
var tokenClient = cfg.clients().stream().filter(c -> c.id().equals("bar")).findAny().orElse(null);
assertNotNull(tokenClient);
assertEquals(List.of("read"), tokenClient.permissions());
var expectedTokenCfg = tokenConfig(
"my-token", List.of("myfingerprint1", "myfingerprint2"), List.of("myaccesshash1", "myaccesshash2"),
List.of("<none>", "2243-10-17T00:00:00Z"));
assertEquals(List.of(expectedTokenCfg), tokenClient.tokens());
}
|
@Override
public SubClusterId getHomeSubcluster(
ApplicationSubmissionContext appSubmissionContext,
List<SubClusterId> blackListSubClusters) throws YarnException {
// null checks and default-queue behavior
validate(appSubmissionContext);
List<ResourceRequest> rrList =
appSubmissionContext.getAMContainerResourceRequests();
// Fast path for FailForward to WeightedRandomRouterPolicy
if (rrList == null || rrList.isEmpty() || (rrList.size() == 1
&& ResourceRequest.isAnyLocation(rrList.get(0).getResourceName()))) {
return super.getHomeSubcluster(appSubmissionContext, blackListSubClusters);
}
if (rrList.size() != 3) {
throw new FederationPolicyException(
"Invalid number of resource requests: " + rrList.size());
}
Map<SubClusterId, SubClusterInfo> activeSubClusters = getActiveSubclusters();
Set<SubClusterId> validSubClusters = activeSubClusters.keySet();
FederationPolicyUtils.validateSubClusterAvailability(activeSubClusters.keySet(),
blackListSubClusters);
if (blackListSubClusters != null) {
// Remove from the active SubClusters from StateStore the blacklisted ones
validSubClusters.removeAll(blackListSubClusters);
}
try {
// With three requests, this has been processed by the
// ResourceRequestInterceptorREST, and should have
// node, rack, and any
SubClusterId targetId = null;
ResourceRequest nodeRequest = null;
ResourceRequest rackRequest = null;
ResourceRequest anyRequest = null;
for (ResourceRequest rr : rrList) {
// Handle "node" requests
try {
targetId = resolver.getSubClusterForNode(rr.getResourceName());
nodeRequest = rr;
} catch (YarnException e) {
LOG.error("Cannot resolve node : {}.", e.getMessage());
}
// Handle "rack" requests
try {
resolver.getSubClustersForRack(rr.getResourceName());
rackRequest = rr;
} catch (YarnException e) {
LOG.error("Cannot resolve rack : {}.", e.getMessage());
}
// Handle "ANY" requests
if (ResourceRequest.isAnyLocation(rr.getResourceName())) {
anyRequest = rr;
continue;
}
}
if (nodeRequest == null) {
throw new YarnException("Missing node request.");
}
if (rackRequest == null) {
throw new YarnException("Missing rack request.");
}
if (anyRequest == null) {
throw new YarnException("Missing any request.");
}
LOG.info("Node request: {} , Rack request: {} , Any request: {}.",
nodeRequest.getResourceName(), rackRequest.getResourceName(),
anyRequest.getResourceName());
// Handle "node" requests
if (validSubClusters.contains(targetId) && enabledSCs
.contains(targetId)) {
LOG.info("Node {} is in SubCluster: {}.", nodeRequest.getResourceName(), targetId);
return targetId;
} else {
throw new YarnException("The node " + nodeRequest.getResourceName()
+ " is in a blacklist SubCluster or not active. ");
}
} catch (YarnException e) {
LOG.error("Validating resource requests failed, " +
"Falling back to WeightedRandomRouterPolicy placement : {}.", e.getMessage());
// FailForward to WeightedRandomRouterPolicy
// Overwrite request to use a default ANY
ResourceRequest amReq = Records.newRecord(ResourceRequest.class);
amReq.setPriority(appSubmissionContext.getPriority());
amReq.setResourceName(ResourceRequest.ANY);
amReq.setCapability(appSubmissionContext.getResource());
amReq.setNumContainers(1);
amReq.setRelaxLocality(true);
amReq.setNodeLabelExpression(appSubmissionContext.getNodeLabelExpression());
amReq.setExecutionTypeRequest(ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED));
appSubmissionContext.setAMContainerResourceRequests(Collections.singletonList(amReq));
return super.getHomeSubcluster(appSubmissionContext, blackListSubClusters);
}
}
|
@Test
public void testMultipleResourceRequests() throws YarnException {
List<ResourceRequest> requests = new ArrayList<ResourceRequest>();
requests.add(ResourceRequest
.newInstance(Priority.UNDEFINED, "node1", Resource.newInstance(10, 1),
1));
requests.add(ResourceRequest
.newInstance(Priority.UNDEFINED, "node2", Resource.newInstance(10, 1),
1));
ApplicationSubmissionContext asc = ApplicationSubmissionContext
.newInstance(null, null, null, null, null, false, false, 0,
Resources.none(), null, false, null, null);
asc.setAMContainerResourceRequests(requests);
try {
((FederationRouterPolicy) getPolicy()).getHomeSubcluster(asc, null);
Assert.fail();
} catch (FederationPolicyException e) {
Assert.assertTrue(
e.getMessage().startsWith("Invalid number of resource requests: "));
}
}
|
static LinkedHashMap<String, KiePMMLProbabilityConfidence> evaluateProbabilityConfidenceMap(final List<KiePMMLScoreDistribution> kiePMMLScoreDistributions,
final double missingValuePenalty) {
LinkedHashMap<String, KiePMMLProbabilityConfidence> toReturn = new LinkedHashMap<>();
if (kiePMMLScoreDistributions == null || kiePMMLScoreDistributions.isEmpty()) {
return toReturn;
}
if (kiePMMLScoreDistributions.get(0).hasProbability()) {
for (KiePMMLScoreDistribution kiePMMLScoreDistribution : kiePMMLScoreDistributions) {
toReturn.put(kiePMMLScoreDistribution.getValue(), new KiePMMLProbabilityConfidence(kiePMMLScoreDistribution.getProbability(), kiePMMLScoreDistribution.getEvaluatedConfidence(missingValuePenalty)));
}
} else {
int totalRecordCount = kiePMMLScoreDistributions.stream()
.map(KiePMMLScoreDistribution::getRecordCount)
.reduce(0, Integer::sum);
for (KiePMMLScoreDistribution kiePMMLScoreDistribution : kiePMMLScoreDistributions) {
toReturn.put(kiePMMLScoreDistribution.getValue(), new KiePMMLProbabilityConfidence(kiePMMLScoreDistribution.getEvaluatedProbability(totalRecordCount), kiePMMLScoreDistribution.getEvaluatedConfidence(missingValuePenalty)));
}
}
return toReturn;
}
|
@Test
void evaluateProbabilityConfidenceMap() {
List<KiePMMLScoreDistribution> kiePMMLScoreDistributions = getRandomKiePMMLScoreDistributions(false);
int totalRecordCount = kiePMMLScoreDistributions.stream()
.map(KiePMMLScoreDistribution::getRecordCount)
.reduce(0, Integer::sum);
final double missingValuePenalty = (double) new Random().nextInt(100) / 10;
LinkedHashMap<String, KiePMMLProbabilityConfidence> retrievedNoProbability = KiePMMLNode.getProbabilityConfidenceMap(kiePMMLScoreDistributions, missingValuePenalty);
assertThat(retrievedNoProbability).isNotNull();
kiePMMLScoreDistributions.forEach(kiePMMLScoreDistribution -> {
assertThat(retrievedNoProbability).containsKey(kiePMMLScoreDistribution.getValue());
KiePMMLProbabilityConfidence kiePMMLProbabilityConfidence = retrievedNoProbability.get(kiePMMLScoreDistribution.getValue());
assertThat(kiePMMLProbabilityConfidence).isNotNull();
double probabilityExpected = (double) kiePMMLScoreDistribution.getRecordCount() / (double) totalRecordCount;
double confidenceExpected = kiePMMLScoreDistribution.getConfidence() * missingValuePenalty;
assertThat(kiePMMLProbabilityConfidence.getProbability()).isCloseTo(probabilityExpected, Offset.offset(0.000000001));
assertThat(kiePMMLProbabilityConfidence.getConfidence()).isCloseTo(confidenceExpected, Offset.offset(0.000000001));
});
//
kiePMMLScoreDistributions = getRandomKiePMMLScoreDistributions(true);
LinkedHashMap<String, KiePMMLProbabilityConfidence> retrievedProbability = KiePMMLNode.getProbabilityConfidenceMap(kiePMMLScoreDistributions, missingValuePenalty);
assertThat(retrievedNoProbability).isNotNull();
kiePMMLScoreDistributions.forEach(kiePMMLScoreDistribution -> {
assertThat(retrievedProbability).containsKey(kiePMMLScoreDistribution.getValue());
KiePMMLProbabilityConfidence kiePMMLProbabilityConfidence = retrievedProbability.get(kiePMMLScoreDistribution.getValue());
assertThat(kiePMMLProbabilityConfidence).isNotNull();
double probabilityExpected = kiePMMLScoreDistribution.getProbability();
double confidenceExpected = kiePMMLScoreDistribution.getConfidence() * missingValuePenalty;
assertThat(kiePMMLProbabilityConfidence.getProbability()).isCloseTo(probabilityExpected, Offset.offset(0.000000001));
assertThat(kiePMMLProbabilityConfidence.getConfidence()).isCloseTo(confidenceExpected, Offset.offset(0.000000001));
});
}
|
public static Collection<File> getFileResourcesByExtension(String extension) {
return Arrays.stream(getClassPathElements())
.flatMap(elem -> internalGetFileResources(elem, Pattern.compile(".*\\." + extension + "$"))
.stream())
.collect(Collectors.toSet());
}
|
@Test
public void getResourcesByExtensionExisting() {
final Collection<File> retrieved = getFileResourcesByExtension("txt");
commonVerifyCollectionWithExpectedFile(retrieved, TEST_FILE);
}
|
public static PathOutputCommitterFactory getCommitterFactory(
Path outputPath,
Configuration conf) {
// determine which key to look up the overall one or a schema-specific
// key
LOG.debug("Looking for committer factory for path {}", outputPath);
String key = COMMITTER_FACTORY_CLASS;
if (StringUtils.isEmpty(conf.getTrimmed(key)) && outputPath != null) {
// there is no explicit factory and there's an output path
// Get the scheme of the destination
String scheme = outputPath.toUri().getScheme();
// and see if it has a key
String schemeKey = String.format(COMMITTER_FACTORY_SCHEME_PATTERN,
scheme);
if (StringUtils.isNotEmpty(conf.getTrimmed(schemeKey))) {
// it does, so use that key in the classname lookup
LOG.info("Using schema-specific factory for {}", outputPath);
key = schemeKey;
} else {
LOG.debug("No scheme-specific factory defined in {}", schemeKey);
}
}
// create the factory. Before using Configuration.getClass, check
// for an empty configuration value, as that raises ClassNotFoundException.
Class<? extends PathOutputCommitterFactory> factory;
String trimmedValue = conf.getTrimmed(key, "");
if (StringUtils.isEmpty(trimmedValue)) {
// empty/null value, use default
LOG.info("No output committer factory defined,"
+ " defaulting to FileOutputCommitterFactory");
factory = FileOutputCommitterFactory.class;
} else {
// key is set, get the class
factory = conf.getClass(key,
FileOutputCommitterFactory.class,
PathOutputCommitterFactory.class);
LOG.info("Using OutputCommitter factory class {} from key {}",
factory, key);
}
return ReflectionUtils.newInstance(factory, conf);
}
|
@Test
public void testCommitterFactoryUnknown() throws Throwable {
Configuration conf = new Configuration();
// set the factory to an unknown class
conf.set(COMMITTER_FACTORY_CLASS, "unknown");
intercept(RuntimeException.class,
() -> getCommitterFactory(HDFS_PATH, conf));
}
|
@Override
public void run() {
try { // make sure we call afterRun() even on crashes
// and operate countdown latches, else we may hang the parallel runner
if (steps == null) {
beforeRun();
}
if (skipped) {
return;
}
int count = steps.size();
int index = 0;
while ((index = nextStepIndex()) < count) {
currentStep = steps.get(index);
execute(currentStep);
if (currentStepResult != null) { // can be null if debug step-back or hook skip
result.addStepResult(currentStepResult);
}
}
} catch (Exception e) {
if (currentStepResult != null) {
result.addStepResult(currentStepResult);
}
logError("scenario [run] failed\n" + StringUtils.throwableToString(e));
currentStepResult = result.addFakeStepResult("scenario [run] failed", e);
} finally {
if (!skipped) {
afterRun();
if (isFailed() && engine.getConfig().isAbortSuiteOnFailure()) {
featureRuntime.suite.abort();
}
}
if (caller.isNone()) {
logAppender.close(); // reclaim memory
}
}
}
|
@Test
void testMatchEachContainsDeep() {
run(
"def response = [ { a: 1, arr: [ { b: 2, c: 3 }, { b: 4, c: 5 } ] } ]",
"match each response contains deep { a: 1, arr: [ { b: '#number', c: 3 } ] }"
);
}
|
public void parse(InputStream stream, ContentHandler handler, Metadata metadata,
ParseContext context) throws IOException, SAXException, TikaException {
if (stream == null) {
throw new NullPointerException("null stream");
}
Throwable t;
boolean alive = false;
ForkClient client = acquireClient();
try {
ContentHandler tee =
(handler instanceof AbstractRecursiveParserWrapperHandler) ? handler :
new TeeContentHandler(handler, new MetadataContentHandler(metadata));
t = client.call("parse", stream, tee, metadata, context);
alive = true;
} catch (TikaException te) {
// Problem occurred on our side
alive = true;
throw te;
} catch (IOException e) {
// Problem occurred on the other side
throw new TikaException("Failed to communicate with a forked parser process." +
" The process has most likely crashed due to some error" +
" like running out of memory. A new process will be" +
" started for the next parsing request.", e);
} finally {
releaseClient(client, alive);
}
if (t instanceof IOException) {
throw (IOException) t;
} else if (t instanceof SAXException) {
throw (SAXException) t;
} else if (t instanceof TikaException) {
throw (TikaException) t;
} else if (t != null) {
throw new TikaException("Unexpected error in forked server process", t);
}
}
|
@Test
public void testToFileHandler() throws Exception {
//test that a server-side write-to-file works without proxying back the
//AbstractContentHandlerFactory
Path target = Files.createTempFile(tempDir, "fork-to-file-handler-", ".txt");
try (InputStream is = getResourceAsStream("/test-documents/basic_embedded.xml")) {
RecursiveParserWrapper wrapper = new RecursiveParserWrapper(new AutoDetectParser());
ToFileHandler toFileHandler =
new ToFileHandler(new SBContentHandlerFactory(), target.toFile());
try (ForkParser forkParser = new ForkParser(ForkParserTest.class.getClassLoader(),
wrapper)) {
Metadata m = new Metadata();
ParseContext context = new ParseContext();
forkParser.parse(is, toFileHandler, m, context);
}
}
String contents = null;
try (Reader reader = Files.newBufferedReader(target, StandardCharsets.UTF_8)) {
contents = IOUtils.toString(reader);
}
assertContainsCount(TikaCoreProperties.TIKA_PARSED_BY.getName() +
" : org.apache.tika.parser.DefaultParser", contents, 2);
assertContainsCount(TikaCoreProperties.TIKA_PARSED_BY.getName() +
" : org.apache.tika.parser.mock.MockParser", contents, 2);
assertContains("Nikolai Lobachevsky", contents);
assertContains("embeddedAuthor", contents);
assertContains("main_content", contents);
assertContains("some_embedded_content", contents);
assertContains("X-TIKA:embedded_resource_path : /embed1.xml", contents);
}
|
@Override
public void route(final RouteContext routeContext, final SingleRule singleRule) {
if (routeContext.getRouteUnits().isEmpty() || sqlStatement instanceof SelectStatement) {
routeStatement(routeContext, singleRule);
} else {
RouteContext newRouteContext = new RouteContext();
routeStatement(newRouteContext, singleRule);
combineRouteContext(routeContext, newRouteContext);
}
}
|
@Test
void assertRouteWithDefaultSingleRule() throws SQLException {
SingleStandardRouteEngine engine = new SingleStandardRouteEngine(mockQualifiedTables(), new MySQLCreateTableStatement(false));
SingleRule singleRule =
new SingleRule(new SingleRuleConfiguration(Collections.emptyList(), "ds_0"), DefaultDatabase.LOGIC_NAME, new MySQLDatabaseType(), createDataSourceMap(), Collections.emptyList());
RouteContext routeContext = new RouteContext();
engine.route(routeContext, singleRule);
List<RouteUnit> routeUnits = new ArrayList<>(routeContext.getRouteUnits());
assertThat(routeContext.getRouteUnits().size(), is(1));
assertThat(routeUnits.get(0).getDataSourceMapper().getActualName(), is("ds_0"));
assertThat(routeUnits.get(0).getTableMappers().size(), is(1));
Iterator<RouteMapper> tableMappers = routeUnits.get(0).getTableMappers().iterator();
RouteMapper tableMapper0 = tableMappers.next();
assertThat(tableMapper0.getActualName(), is("t_order"));
assertThat(tableMapper0.getLogicName(), is("t_order"));
}
|
public int getSize() {
return barSeries.getBarCount() - 1;
}
|
@Test
public void returnSize() {
for (Returns.ReturnType type : Returns.ReturnType.values()) {
// No return at index 0
BarSeries sampleBarSeries = new MockBarSeries(numFunction, 1d, 2d, 3d, 4d, 5d);
Returns returns = new Returns(sampleBarSeries, new BaseTradingRecord(), type);
assertEquals(4, returns.getSize());
}
}
|
@Override
public Path mkdir(final Path folder, final TransferStatus status) throws BackgroundException {
try {
if(containerService.isContainer(folder)) {
final Storage.Buckets.Insert request = session.getClient().buckets().insert(session.getHost().getCredentials().getUsername(),
new Bucket()
.setLocation(status.getRegion())
.setStorageClass(status.getStorageClass())
.setName(containerService.getContainer(folder).getName()));
final Bucket bucket = request.execute();
final EnumSet<Path.Type> type = EnumSet.copyOf(folder.getType());
type.add(Path.Type.volume);
return folder.withType(type).withAttributes(new GoogleStorageAttributesFinderFeature(session).toAttributes(bucket));
}
else {
final EnumSet<Path.Type> type = EnumSet.copyOf(folder.getType());
type.add(Path.Type.placeholder);
// Add placeholder object
return new GoogleStorageTouchFeature(session).withWriter(writer).touch(folder.withType(type),
status.withMime(MIMETYPE));
}
}
catch(IOException e) {
throw new GoogleStorageExceptionMappingService().map("Cannot create folder {0}", e, folder);
}
}
|
@Test
public void testDirectoryWhitespace() throws Exception {
final Path bucket = new Path("cyberduck-test-eu", EnumSet.of(Path.Type.directory, Path.Type.volume));
final Path test = new GoogleStorageDirectoryFeature(session).mkdir(new Path(bucket,
String.format("%s %s", new AlphanumericRandomStringService().random(), new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.directory)), new TransferStatus());
assertTrue(new GoogleStorageFindFeature(session).find(test));
assertTrue(new DefaultFindFeature(session).find(test));
new GoogleStorageDeleteFeature(session).delete(Collections.<Path>singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
protected CertPair refreshCert() throws IOException {
KeyPair keyPair = signWithEcdsa();
if (keyPair == null) {
keyPair = signWithRsa();
}
if (keyPair == null) {
logger.error(
CONFIG_SSL_CERT_GENERATE_FAILED,
"",
"",
"Generate Key failed. Please check if your system support.");
return null;
}
String csr = generateCsr(keyPair);
DubboCertificateServiceGrpc.DubboCertificateServiceBlockingStub stub =
DubboCertificateServiceGrpc.newBlockingStub(channel);
stub = setHeaderIfNeed(stub);
String privateKeyPem = generatePrivatePemKey(keyPair);
DubboCertificateResponse certificateResponse = stub.createCertificate(generateRequest(csr));
if (certificateResponse == null || !certificateResponse.getSuccess()) {
logger.error(
CONFIG_SSL_CERT_GENERATE_FAILED,
"",
"",
"Failed to generate cert from Dubbo Certificate Authority. " + "Message: "
+ (certificateResponse == null ? "null" : certificateResponse.getMessage()));
return null;
}
logger.info("Successfully generate cert from Dubbo Certificate Authority. Cert expire time: "
+ certificateResponse.getExpireTime());
return new CertPair(
privateKeyPem,
certificateResponse.getCertPem(),
String.join("\n", certificateResponse.getTrustCertsList()),
certificateResponse.getExpireTime());
}
|
@Test
void testRefreshCert() throws IOException {
try (MockedStatic<DubboCertManager> managerMock =
Mockito.mockStatic(DubboCertManager.class, CALLS_REAL_METHODS)) {
FrameworkModel frameworkModel = new FrameworkModel();
DubboCertManager certManager = new DubboCertManager(frameworkModel);
managerMock.when(DubboCertManager::signWithEcdsa).thenReturn(null);
managerMock.when(DubboCertManager::signWithRsa).thenReturn(null);
Assertions.assertNull(certManager.refreshCert());
managerMock.when(DubboCertManager::signWithEcdsa).thenCallRealMethod();
certManager.channel = Mockito.mock(Channel.class);
try (MockedStatic<DubboCertificateServiceGrpc> mockGrpc =
Mockito.mockStatic(DubboCertificateServiceGrpc.class, CALLS_REAL_METHODS)) {
DubboCertificateServiceGrpc.DubboCertificateServiceBlockingStub stub =
Mockito.mock(DubboCertificateServiceGrpc.DubboCertificateServiceBlockingStub.class);
mockGrpc.when(() -> DubboCertificateServiceGrpc.newBlockingStub(Mockito.any(Channel.class)))
.thenReturn(stub);
Mockito.when(stub.createCertificate(Mockito.any()))
.thenReturn(DubboCertificateResponse.newBuilder()
.setSuccess(false)
.build());
certManager.certConfig = new CertConfig(null, null, null, null);
Assertions.assertNull(certManager.refreshCert());
String file = this.getClass()
.getClassLoader()
.getResource("certs/token")
.getFile();
Mockito.when(stub.withInterceptors(Mockito.any())).thenReturn(stub);
certManager.certConfig = new CertConfig(null, null, null, file);
Assertions.assertNull(certManager.refreshCert());
Mockito.verify(stub, Mockito.times(1)).withInterceptors(Mockito.any());
Mockito.when(stub.createCertificate(Mockito.any()))
.thenReturn(DubboCertificateResponse.newBuilder()
.setSuccess(true)
.setCertPem("certPem")
.addTrustCerts("trustCerts")
.setExpireTime(123456)
.build());
CertPair certPair = certManager.refreshCert();
Assertions.assertNotNull(certPair);
Assertions.assertEquals("certPem", certPair.getCertificate());
Assertions.assertEquals("trustCerts", certPair.getTrustCerts());
Assertions.assertEquals(123456, certPair.getExpireTime());
Mockito.when(stub.createCertificate(Mockito.any())).thenReturn(null);
Assertions.assertNull(certManager.refreshCert());
}
frameworkModel.destroy();
}
}
|
TopicFilter topicFilter() {
return getConfiguredInstance(TOPIC_FILTER_CLASS, TopicFilter.class);
}
|
@Test
public void testTopicMatching() {
MirrorSourceConfig config = new MirrorSourceConfig(makeProps("topics", "topic1"));
assertTrue(config.topicFilter().shouldReplicateTopic("topic1"),
"topic1 replication property configuration failed");
assertFalse(config.topicFilter().shouldReplicateTopic("topic2"),
"topic2 replication property configuration failed");
}
|
public static Date parseDate(final String str) {
try {
return new Date(TimeUnit.DAYS.toMillis(
LocalDate.parse(PartialStringToTimestampParser.completeDate(str))
.toEpochDay()));
} catch (DateTimeParseException e) {
throw new KsqlException("Failed to parse date '" + str
+ "': " + e.getMessage()
+ DATE_HELP_MESSAGE,
e
);
}
}
|
@Test
public void shouldNotParseDate() {
// When:
final KsqlException e = assertThrows(
KsqlException.class,
() -> SqlTimeTypes.parseDate("foo")
);
// Then
assertThat(e.getMessage(), containsString(
"Required format is: \"yyyy-MM-dd\""));
}
|
public static Boolean convertToBoolean(Schema schema, Object value) throws DataException {
if (value == null) {
return null;
} else if (value instanceof Boolean) {
return (Boolean) value;
} else if (value instanceof String) {
SchemaAndValue parsed = parseString(value.toString());
if (parsed.value() instanceof Boolean) {
return (Boolean) parsed.value();
}
}
return asLong(value, schema, null) == 0L ? Boolean.FALSE : Boolean.TRUE;
}
|
@Test
public void shouldFailToParseInvalidBooleanValueString() {
assertThrows(DataException.class, () -> Values.convertToBoolean(Schema.STRING_SCHEMA, "\"green\""));
}
|
public Set<Device> getDevicesFromPath(String path) throws IOException {
MutableInt counter = new MutableInt(0);
try (Stream<Path> stream = Files.walk(Paths.get(path), 1)) {
return stream.filter(p -> p.toFile().getName().startsWith("veslot"))
.map(p -> toDevice(p, counter))
.collect(Collectors.toSet());
}
}
|
@Test
public void testDeviceNumberFromMajorAndMinor() throws IOException {
createVeSlotFile(0);
createVeSlotFile(1);
createVeSlotFile(2);
createOsStateFile(0);
when(mockCommandExecutor.getOutput()).thenReturn(
"10:1:character special file",
"1d:2:character special file",
"4:3c:character special file");
when(udevUtil.getSysPath(anyInt(), anyChar())).thenReturn(testFolder);
Set<Device> devices = discoverer.getDevicesFromPath(testFolder);
List<Device> devicesList = Lists.newArrayList(devices);
devicesList.sort(DEVICE_COMPARATOR);
Device device0 = devicesList.get(0);
assertEquals("Major number", 16, device0.getMajorNumber());
assertEquals("Minor number", 1, device0.getMinorNumber());
Device device1 = devicesList.get(1);
assertEquals("Major number", 29, device1.getMajorNumber());
assertEquals("Minor number", 2, device1.getMinorNumber());
Device device2 = devicesList.get(2);
assertEquals("Major number", 4, device2.getMajorNumber());
assertEquals("Minor number", 60, device2.getMinorNumber());
}
|
public static ProfileManager getInstance() {
if (INSTANCE == null) {
INSTANCE = new ProfileManager();
}
return INSTANCE;
}
|
@Test
public void testSingleton() {
ProfileManager instance1 = ProfileManager.getInstance();
ProfileManager instance2 = ProfileManager.getInstance();
assertSame(instance1, instance2, "ProfileManager should be singleton");
}
|
public boolean createTable(CreateTableStmt stmt, List<Column> partitionColumns) throws DdlException {
String dbName = stmt.getDbName();
String tableName = stmt.getTableName();
Map<String, String> properties = stmt.getProperties() != null ? stmt.getProperties() : new HashMap<>();
Path tablePath = null;
boolean tableLocationExists = false;
if (!stmt.isExternal()) {
checkLocationProperties(properties);
if (!Strings.isNullOrEmpty(properties.get(LOCATION_PROPERTY))) {
String tableLocationWithUserAssign = properties.get(LOCATION_PROPERTY);
tablePath = new Path(tableLocationWithUserAssign);
if (pathExists(tablePath, hadoopConf)) {
tableLocationExists = true;
if (!isEmpty(tablePath, hadoopConf)) {
throw new StarRocksConnectorException("not support creating table under non-empty directory: %s",
tableLocationWithUserAssign);
}
}
} else {
tablePath = getDefaultLocation(dbName, tableName);
}
} else {
// checkExternalLocationProperties(properties);
if (properties.containsKey(EXTERNAL_LOCATION_PROPERTY)) {
tablePath = new Path(properties.get(EXTERNAL_LOCATION_PROPERTY));
} else if (properties.containsKey(LOCATION_PROPERTY)) {
tablePath = new Path(properties.get(LOCATION_PROPERTY));
}
tableLocationExists = true;
}
HiveStorageFormat.check(properties);
List<String> partitionColNames;
if (partitionColumns.isEmpty()) {
partitionColNames = stmt.getPartitionDesc() != null ?
((ListPartitionDesc) stmt.getPartitionDesc()).getPartitionColNames() : new ArrayList<>();
} else {
partitionColNames = partitionColumns.stream().map(Column::getName).collect(Collectors.toList());
}
// default is managed table
HiveTable.HiveTableType tableType = HiveTable.HiveTableType.MANAGED_TABLE;
if (stmt.isExternal()) {
tableType = HiveTable.HiveTableType.EXTERNAL_TABLE;
}
HiveTable.Builder builder = HiveTable.builder()
.setId(ConnectorTableId.CONNECTOR_ID_GENERATOR.getNextId().asInt())
.setTableName(tableName)
.setCatalogName(catalogName)
.setResourceName(toResourceName(catalogName, "hive"))
.setHiveDbName(dbName)
.setHiveTableName(tableName)
.setPartitionColumnNames(partitionColNames)
.setDataColumnNames(stmt.getColumns().stream()
.map(Column::getName)
.collect(Collectors.toList()).subList(0, stmt.getColumns().size() - partitionColNames.size()))
.setFullSchema(stmt.getColumns())
.setTableLocation(tablePath == null ? null : tablePath.toString())
.setProperties(stmt.getProperties())
.setStorageFormat(HiveStorageFormat.get(properties.getOrDefault(FILE_FORMAT, "parquet")))
.setCreateTime(System.currentTimeMillis())
.setHiveTableType(tableType);
Table table = builder.build();
try {
if (!tableLocationExists) {
createDirectory(tablePath, hadoopConf);
}
metastore.createTable(dbName, table);
} catch (Exception e) {
LOG.error("Failed to create table {}.{}", dbName, tableName);
boolean shouldDelete;
try {
if (tableExists(dbName, tableName)) {
LOG.warn("Table {}.{} already exists. But some error occur such as accessing meta service timeout",
dbName, table, e);
return true;
}
FileSystem fileSystem = FileSystem.get(URI.create(tablePath.toString()), hadoopConf);
shouldDelete = !fileSystem.listLocatedStatus(tablePath).hasNext() && !tableLocationExists;
if (shouldDelete) {
fileSystem.delete(tablePath);
}
} catch (Exception e1) {
LOG.error("Failed to delete table location {}", tablePath, e);
}
throw new DdlException(String.format("Failed to create table %s.%s. msg: %s", dbName, tableName, e.getMessage()));
}
return true;
}
|
@Test
public void testCreateTable() throws DdlException {
new MockUp<HiveWriteUtils>() {
public void createDirectory(Path path, Configuration conf) {
}
};
HiveMetastoreOperations mockedHmsOps = new HiveMetastoreOperations(cachingHiveMetastore, true,
new Configuration(), MetastoreType.HMS, "hive_catalog") {
@Override
public Path getDefaultLocation(String dbName, String tableName) {
return new Path("mytable_locatino");
}
};
CreateTableStmt stmt = new CreateTableStmt(
false,
false,
new TableName("hive_catalog", "hive_db", "hive_table"),
Lists.newArrayList(
new ColumnDef("c1", TypeDef.create(PrimitiveType.INT)),
new ColumnDef("p1", TypeDef.create(PrimitiveType.INT))),
"hive",
null,
new ListPartitionDesc(Lists.newArrayList("p1"), new ArrayList<>()),
null,
new HashMap<>(),
new HashMap<>(),
"my table comment");
List<Column> columns = stmt.getColumnDefs()
.stream()
.map(columnDef -> columnDef.toColumn(null))
.collect(Collectors.toList());
stmt.setColumns(columns);
Assert.assertTrue(mockedHmsOps.createTable(stmt));
}
|
@SuppressWarnings("unchecked")
@Override
public <T extends Statement> ConfiguredStatement<T> inject(
final ConfiguredStatement<T> statement
) {
if (!(statement.getStatement() instanceof CreateSource)
&& !(statement.getStatement() instanceof CreateAsSelect)) {
return statement;
}
try {
if (statement.getStatement() instanceof CreateSource) {
final ConfiguredStatement<CreateSource> createStatement =
(ConfiguredStatement<CreateSource>) statement;
return (ConfiguredStatement<T>) forCreateStatement(createStatement).orElse(createStatement);
} else {
final ConfiguredStatement<CreateAsSelect> createStatement =
(ConfiguredStatement<CreateAsSelect>) statement;
return (ConfiguredStatement<T>) forCreateAsStatement(createStatement).orElse(
createStatement);
}
} catch (final KsqlStatementException e) {
throw e;
} catch (final KsqlException e) {
throw new KsqlStatementException(
ErrorMessageUtil.buildErrorMessage(e),
statement.getMaskedStatementText(),
e.getCause());
}
}
|
@Test
public void shouldReturnStatementUnchangedIfCtFormatsDoNotSupportInference() {
// Given:
givenNeitherKeyNorValueInferenceSupported();
// When:
final ConfiguredStatement<?> result = injector.inject(ctStatement);
// Then:
assertThat(result, is(sameInstance(ctStatement)));
}
|
public void add(int value) {
add(Util.toUnsignedLong(value));
}
|
@Test
public void testEqual() {
// empty == empty
BitmapValue emp1 = new BitmapValue();
BitmapValue emp2 = new BitmapValue();
assertEquals(emp1, emp2);
// empty == single value
emp2.add(1);
Assert.assertNotEquals(emp1, emp2);
// empty == bitmap
emp2.add(2);
Assert.assertNotEquals(emp1, emp2);
// single value = empty
BitmapValue sgv = new BitmapValue();
sgv.add(1);
BitmapValue emp3 = new BitmapValue();
Assert.assertNotEquals(sgv, emp3);
// single value = single value
BitmapValue sgv1 = new BitmapValue();
sgv1.add(1);
BitmapValue sgv2 = new BitmapValue();
sgv2.add(2);
assertEquals(sgv, sgv1);
Assert.assertNotEquals(sgv, sgv2);
// single value = bitmap
sgv2.add(3);
Assert.assertNotEquals(sgv, sgv2);
// bitmap == empty
BitmapValue bitmapValue = new BitmapValue();
bitmapValue.add(1);
bitmapValue.add(2);
BitmapValue emp4 = new BitmapValue();
Assert.assertNotEquals(bitmapValue, emp4);
// bitmap == singlevalue
BitmapValue sgv3 = new BitmapValue();
sgv3.add(1);
Assert.assertNotEquals(bitmapValue, sgv3);
// bitmap == bitmap
BitmapValue bitmapValue1 = new BitmapValue();
bitmapValue1.add(1);
BitmapValue bitmapValue2 = new BitmapValue();
bitmapValue2.add(1);
bitmapValue2.add(2);
assertEquals(bitmapValue, bitmapValue2);
Assert.assertNotEquals(bitmapValue, bitmapValue1);
}
|
public boolean isValid(String value) {
if (value == null) {
return false;
}
URI uri; // ensure value is a valid URI
try {
uri = new URI(value);
} catch (URISyntaxException e) {
return false;
}
// OK, perfom additional validation
String scheme = uri.getScheme();
if (!isValidScheme(scheme)) {
return false;
}
String authority = uri.getRawAuthority();
if ("file".equals(scheme) && (authority == null || "".equals(authority))) { // Special case - file: allows an empty authority
return true; // this is a local file - nothing more to do here
} else if ("file".equals(scheme) && authority != null && authority.contains(":")) {
return false;
} else {
// Validate the authority
if (!isValidAuthority(authority)) {
return false;
}
}
if (!isValidPath(uri.getRawPath())) {
return false;
}
if (!isValidQuery(uri.getRawQuery())) {
return false;
}
if (!isValidFragment(uri.getRawFragment())) {
return false;
}
return true;
}
|
@Test
public void testValidator204() {
String[] schemes = {"http", "https"};
UrlValidator urlValidator = new UrlValidator(schemes);
assertTrue(urlValidator.isValid("http://tech.yahoo.com/rc/desktops/102;_ylt=Ao8yevQHlZ4On0O3ZJGXLEQFLZA5"));
}
|
protected Name getReverseZoneName(Configuration conf) {
Name name = null;
String zoneSubnet = getZoneSubnet(conf);
if (zoneSubnet == null) {
LOG.warn("Zone subnet is not configured. Reverse lookups disabled");
} else {
// is there a netmask
String mask = conf.get(KEY_DNS_ZONE_MASK);
if (mask != null) {
// get the range of IPs
SubnetUtils utils = new SubnetUtils(zoneSubnet, mask);
name = getReverseZoneName(utils, zoneSubnet);
} else {
name = getReverseZoneName(zoneSubnet);
}
}
return name;
}
|
@Test
public void testReverseZoneNames() throws Exception {
Configuration conf = new Configuration();
conf.set(KEY_DNS_ZONE_SUBNET, "172.26.32.0");
conf.set(KEY_DNS_ZONE_MASK, "255.255.224.0");
Name name = getRegistryDNS().getReverseZoneName(conf);
assertEquals("wrong name", "26.172.in-addr.arpa.", name.toString());
}
|
@CanIgnoreReturnValue
public final Ordered containsAtLeast(
@Nullable Object k0, @Nullable Object v0, @Nullable Object... rest) {
return containsAtLeastEntriesIn(accumulateMultimap(k0, v0, rest));
}
|
@Test
public void containsAtLeastVarargInOrder() {
ImmutableMultimap<Integer, String> actual =
ImmutableMultimap.of(3, "one", 3, "six", 3, "two", 4, "five", 4, "four");
assertThat(actual).containsAtLeast(3, "one", 3, "six", 4, "five", 4, "four").inOrder();
}
|
public static LocalDateTime parse(CharSequence text) {
return parse(text, (DateTimeFormatter) null);
}
|
@Test
public void parseTest3() {
final LocalDateTime localDateTime = LocalDateTimeUtil.parse("12:23:56", DatePattern.NORM_TIME_PATTERN);
assertEquals("12:23:56", Objects.requireNonNull(localDateTime).toLocalTime().toString());
}
|
@Override
public ObjectiveTranslation doTranslate(NextObjective obj)
throws FabricPipelinerException {
final ObjectiveTranslation.Builder resultBuilder =
ObjectiveTranslation.builder();
switch (obj.type()) {
case SIMPLE:
simpleNext(obj, resultBuilder, false);
break;
case HASHED:
hashedNext(obj, resultBuilder);
break;
case BROADCAST:
if (isXconnect(obj)) {
xconnectNext(obj, resultBuilder);
} else {
multicastNext(obj, resultBuilder);
}
break;
default:
log.warn("Unsupported NextObjective type '{}'", obj);
return ObjectiveTranslation.ofError(ObjectiveError.UNSUPPORTED);
}
if (!isGroupModifyOp(obj)) {
// Generate next MPLS and VLAN rules.
nextMpls(obj, resultBuilder);
nextVlan(obj, resultBuilder);
}
return resultBuilder.build();
}
|
@Test
public void testXconnectOutput() throws FabricPipelinerException {
TrafficTreatment treatment1 = DefaultTrafficTreatment.builder()
.setOutput(PORT_1)
.build();
TrafficTreatment treatment2 = DefaultTrafficTreatment.builder()
.setOutput(PORT_2)
.build();
NextObjective nextObjective = DefaultNextObjective.builder()
.withId(NEXT_ID_1)
.withPriority(PRIORITY)
.addTreatment(treatment1)
.addTreatment(treatment2)
.withType(NextObjective.Type.BROADCAST)
.makePermanent()
.fromApp(XCONNECT_APP_ID)
.add();
ObjectiveTranslation actualTranslation = translatorHashed.doTranslate(nextObjective);
// Should generate 2 flows for the xconnect table.
// Expected multicast table flow rule.
PiCriterion nextIdCriterion = PiCriterion.builder()
.matchExact(FabricConstants.HDR_NEXT_ID, NEXT_ID_1)
.build();
TrafficSelector xcSelector1 = DefaultTrafficSelector.builder()
.matchPi(nextIdCriterion)
.matchInPort(PORT_1)
.build();
TrafficTreatment xcTreatment1 = DefaultTrafficTreatment.builder()
.piTableAction(PiAction.builder()
.withId(FabricConstants.FABRIC_INGRESS_NEXT_OUTPUT_XCONNECT)
.withParameter(new PiActionParam(FabricConstants.PORT_NUM, PORT_2.toLong()))
.build())
.build();
TrafficSelector xcSelector2 = DefaultTrafficSelector.builder()
.matchPi(nextIdCriterion)
.matchInPort(PORT_2)
.build();
TrafficTreatment xcTreatment2 = DefaultTrafficTreatment.builder()
.piTableAction(PiAction.builder()
.withId(FabricConstants.FABRIC_INGRESS_NEXT_OUTPUT_XCONNECT)
.withParameter(new PiActionParam(FabricConstants.PORT_NUM, PORT_1.toLong()))
.build())
.build();
FlowRule expectedXcFlowRule1 = DefaultFlowRule.builder()
.forDevice(DEVICE_ID)
.fromApp(XCONNECT_APP_ID)
.makePermanent()
.withPriority(nextObjective.priority())
.forTable(FabricConstants.FABRIC_INGRESS_NEXT_XCONNECT)
.withSelector(xcSelector1)
.withTreatment(xcTreatment1)
.build();
FlowRule expectedXcFlowRule2 = DefaultFlowRule.builder()
.forDevice(DEVICE_ID)
.fromApp(XCONNECT_APP_ID)
.makePermanent()
.withPriority(nextObjective.priority())
.forTable(FabricConstants.FABRIC_INGRESS_NEXT_XCONNECT)
.withSelector(xcSelector2)
.withTreatment(xcTreatment2)
.build();
ObjectiveTranslation expectedTranslation = ObjectiveTranslation.builder()
.addFlowRule(expectedXcFlowRule1)
.addFlowRule(expectedXcFlowRule2)
.build();
assertEquals(expectedTranslation, actualTranslation);
}
|
@Override
public Column convert(BasicTypeDefine typeDefine) {
PhysicalColumn.PhysicalColumnBuilder builder =
PhysicalColumn.builder()
.name(typeDefine.getName())
.sourceType(typeDefine.getColumnType())
.nullable(typeDefine.isNullable())
.defaultValue(typeDefine.getDefaultValue())
.comment(typeDefine.getComment());
String pgDataType = typeDefine.getDataType().toLowerCase();
switch (pgDataType) {
case PG_BOOLEAN:
builder.dataType(BasicType.BOOLEAN_TYPE);
break;
case PG_BOOLEAN_ARRAY:
builder.dataType(ArrayType.BOOLEAN_ARRAY_TYPE);
break;
case PG_SMALLSERIAL:
case PG_SMALLINT:
builder.dataType(BasicType.SHORT_TYPE);
break;
case PG_SMALLINT_ARRAY:
builder.dataType(ArrayType.SHORT_ARRAY_TYPE);
break;
case PG_INTEGER:
case PG_SERIAL:
builder.dataType(BasicType.INT_TYPE);
break;
case PG_INTEGER_ARRAY:
builder.dataType(ArrayType.INT_ARRAY_TYPE);
break;
case PG_BIGINT:
case PG_BIGSERIAL:
builder.dataType(BasicType.LONG_TYPE);
break;
case PG_BIGINT_ARRAY:
builder.dataType(ArrayType.LONG_ARRAY_TYPE);
break;
case PG_REAL:
builder.dataType(BasicType.FLOAT_TYPE);
break;
case PG_REAL_ARRAY:
builder.dataType(ArrayType.FLOAT_ARRAY_TYPE);
break;
case PG_DOUBLE_PRECISION:
builder.dataType(BasicType.DOUBLE_TYPE);
break;
case PG_DOUBLE_PRECISION_ARRAY:
builder.dataType(ArrayType.DOUBLE_ARRAY_TYPE);
break;
case PG_NUMERIC:
DecimalType decimalType;
if (typeDefine.getPrecision() != null && typeDefine.getPrecision() > 0) {
decimalType =
new DecimalType(
typeDefine.getPrecision().intValue(), typeDefine.getScale());
} else {
decimalType = new DecimalType(DEFAULT_PRECISION, DEFAULT_SCALE);
}
builder.dataType(decimalType);
break;
case PG_MONEY:
// -92233720368547758.08 to +92233720368547758.07, With the sign bit it's 20, we use
// 30 precision to save it
DecimalType moneyDecimalType;
moneyDecimalType = new DecimalType(30, 2);
builder.dataType(moneyDecimalType);
builder.columnLength(30L);
builder.scale(2);
break;
case PG_CHAR:
case PG_CHARACTER:
builder.dataType(BasicType.STRING_TYPE);
if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) {
builder.columnLength(TypeDefineUtils.charTo4ByteLength(1L));
builder.sourceType(pgDataType);
} else {
builder.columnLength(TypeDefineUtils.charTo4ByteLength(typeDefine.getLength()));
builder.sourceType(String.format("%s(%s)", pgDataType, typeDefine.getLength()));
}
break;
case PG_VARCHAR:
case PG_CHARACTER_VARYING:
builder.dataType(BasicType.STRING_TYPE);
if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) {
builder.sourceType(pgDataType);
} else {
builder.sourceType(String.format("%s(%s)", pgDataType, typeDefine.getLength()));
builder.columnLength(TypeDefineUtils.charTo4ByteLength(typeDefine.getLength()));
}
break;
case PG_TEXT:
builder.dataType(BasicType.STRING_TYPE);
break;
case PG_UUID:
builder.dataType(BasicType.STRING_TYPE);
builder.sourceType(pgDataType);
builder.columnLength(128L);
break;
case PG_JSON:
case PG_JSONB:
case PG_XML:
case PG_GEOMETRY:
case PG_GEOGRAPHY:
builder.dataType(BasicType.STRING_TYPE);
break;
case PG_CHAR_ARRAY:
case PG_VARCHAR_ARRAY:
case PG_TEXT_ARRAY:
builder.dataType(ArrayType.STRING_ARRAY_TYPE);
break;
case PG_BYTEA:
builder.dataType(PrimitiveByteArrayType.INSTANCE);
break;
case PG_DATE:
builder.dataType(LocalTimeType.LOCAL_DATE_TYPE);
break;
case PG_TIME:
case PG_TIME_TZ:
builder.dataType(LocalTimeType.LOCAL_TIME_TYPE);
if (typeDefine.getScale() != null && typeDefine.getScale() > MAX_TIME_SCALE) {
builder.scale(MAX_TIME_SCALE);
log.warn(
"The scale of time type is larger than {}, it will be truncated to {}",
MAX_TIME_SCALE,
MAX_TIME_SCALE);
} else {
builder.scale(typeDefine.getScale());
}
break;
case PG_TIMESTAMP:
case PG_TIMESTAMP_TZ:
builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE);
if (typeDefine.getScale() != null && typeDefine.getScale() > MAX_TIMESTAMP_SCALE) {
builder.scale(MAX_TIMESTAMP_SCALE);
log.warn(
"The scale of timestamp type is larger than {}, it will be truncated to {}",
MAX_TIMESTAMP_SCALE,
MAX_TIMESTAMP_SCALE);
} else {
builder.scale(typeDefine.getScale());
}
break;
default:
throw CommonError.convertToSeaTunnelTypeError(
identifier(), typeDefine.getDataType(), typeDefine.getName());
}
return builder.build();
}
|
@Test
public void testConvertTime() {
BasicTypeDefine<Object> typeDefine =
BasicTypeDefine.builder().name("test").columnType("time").dataType("time").build();
Column column = PostgresTypeConverter.INSTANCE.convert(typeDefine);
Assertions.assertEquals(typeDefine.getName(), column.getName());
Assertions.assertEquals(LocalTimeType.LOCAL_TIME_TYPE, column.getDataType());
Assertions.assertEquals(typeDefine.getScale(), column.getScale());
Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType().toLowerCase());
typeDefine =
BasicTypeDefine.builder()
.name("test")
.columnType("time(3)")
.dataType("time")
.length(3L)
.build();
column = PostgresTypeConverter.INSTANCE.convert(typeDefine);
Assertions.assertEquals(typeDefine.getName(), column.getName());
Assertions.assertEquals(LocalTimeType.LOCAL_TIME_TYPE, column.getDataType());
Assertions.assertEquals(typeDefine.getScale(), column.getScale());
Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType().toLowerCase());
typeDefine =
BasicTypeDefine.builder()
.name("test")
.columnType("timetz")
.dataType("timetz")
.build();
column = PostgresTypeConverter.INSTANCE.convert(typeDefine);
Assertions.assertEquals(typeDefine.getName(), column.getName());
Assertions.assertEquals(LocalTimeType.LOCAL_TIME_TYPE, column.getDataType());
Assertions.assertEquals(typeDefine.getScale(), column.getScale());
Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType().toLowerCase());
typeDefine =
BasicTypeDefine.builder()
.name("test")
.columnType("timetz(3)")
.dataType("timetz")
.length(3L)
.build();
column = PostgresTypeConverter.INSTANCE.convert(typeDefine);
Assertions.assertEquals(typeDefine.getName(), column.getName());
Assertions.assertEquals(LocalTimeType.LOCAL_TIME_TYPE, column.getDataType());
Assertions.assertEquals(typeDefine.getScale(), column.getScale());
Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType().toLowerCase());
}
|
public void process(final Exchange exchange) {
final ExecutionContext executionContext = smooks.createExecutionContext();
try {
executionContext.put(EXCHANGE_TYPED_KEY, exchange);
String charsetName = (String) exchange.getProperty(Exchange.CHARSET_NAME);
if (charsetName != null) {
// if provided use the came character encoding
executionContext.setContentEncoding(charsetName);
}
exchange.getIn().setHeader(SMOOKS_EXECUTION_CONTEXT, executionContext);
setupSmooksReporting(executionContext);
final Exports exports = smooks.getApplicationContext().getRegistry().lookup(new ExportsLookup());
if (exports.hasExports()) {
final Result[] results = exports.createResults();
smooks.filterSource(executionContext, getSource(exchange), results);
setResultOnBody(exports, results, exchange);
} else {
smooks.filterSource(executionContext, getSource(exchange));
}
} finally {
executionContext.remove(EXCHANGE_TYPED_KEY);
}
}
|
@Test
public void testProcessWhenSmooksExportIsStringResult() throws Exception {
context.addRoutes(new RouteBuilder() {
public void configure() {
from("direct:a")
.process(new SmooksProcessor(new Smooks().setExports(new Exports(StringResult.class)), context))
.to("direct:b");
from("direct:b").convertBodyTo(String.class).process(new DirectBProcessor());
}
});
context.start();
template.request("direct:a", exchange -> exchange.getIn().setBody(new StringSource("<x/>")));
assertEquals("<x/>", DirectBProcessor.inMessage);
}
|
private static DopSettingsResource toDopSettingsResource(AlmSettingDto almSettingDto) {
return new DopSettingsResource(
almSettingDto.getUuid(),
toResponseAlm(almSettingDto.getAlm()).name(),
almSettingDto.getKey(),
almSettingDto.getUrl(),
almSettingDto.getAppId());
}
|
@Test
void fetchAllDopSettings_whenDbClientReturnsData_returnsResponse() throws Exception {
AlmSettingDto almSettingDto1 = generateAlmSettingsDto("github");
AlmSettingDto almSettingDto2 = generateAlmSettingsDto("azure_devops");
AlmSettingDto almSettingDto3 = generateAlmSettingsDto("bitbucket_cloud");
List<AlmSettingDto> dopSettings = List.of(
almSettingDto1,
almSettingDto2,
almSettingDto3
);
when(dbClient.almSettingDao().selectAll(dbSession)).thenReturn(dopSettings);
userSession.logIn().addPermission(PROVISION_PROJECTS);;
MvcResult mvcResult = mockMvc
.perform(get(DOP_SETTINGS_ENDPOINT))
.andExpect(status().isOk())
.andReturn();
DopSettingsRestResponse response = gson.fromJson(mvcResult.getResponse().getContentAsString(), DopSettingsRestResponse.class);
List<DopSettingsResource> expectedDopSettings = List.of(
toDopSettingsResource(almSettingDto1, "github"),
toDopSettingsResource(almSettingDto2, "azure"),
toDopSettingsResource(almSettingDto3, "bitbucketcloud")
);
assertThat(response.dopSettings())
.containsExactlyInAnyOrderElementsOf(expectedDopSettings);
}
|
public void validate(WorkflowCreateRequest request, User caller) {
LOG.debug(
"validating workflow [{}] for user [{}]", request.getWorkflow().getId(), caller.getName());
List<String> errors = new ArrayList<>();
if (request.getWorkflow() == null) {
errors.add("workflow cannot be null");
}
// definition data will be checked by dryRunValidator
try {
dryRunValidator.validate(request.getWorkflow(), caller);
} catch (MaestroDryRunException e) {
errors.add(e.getCause() != null ? e.getCause().getMessage() : e.getMessage());
}
if (!errors.isEmpty()) {
throw new MaestroBadRequestException(errors, "Invalid workflow create request");
}
}
|
@Test
public void testWorkflowValidate() {
WorkflowCreateRequest request = new WorkflowCreateRequest();
request.setWorkflow(definition.getWorkflow());
actionHandler.validate(request, tester);
verify(dryRunValidator, times(1)).validate(any(), any());
verify(workflowDao, times(0)).addWorkflowDefinition(any(), any());
}
|
public URLNormalizer removeDirectoryIndex() {
String path = toURL().getPath();
if (PATTERN_PATH_LAST_SEGMENT.matcher(path).matches()) {
url = StringUtils.replaceOnce(
url, path, StringUtils.substringBeforeLast(path, "/") + "/");
}
return this;
}
|
@Test
public void testRemoveDirectoryIndex() {
s = "http://www.example.com/index.html";
t = "http://www.example.com/";
assertEquals(t, n(s).removeDirectoryIndex().toString());
s = "http://www.example.com/index.html/a";
t = "http://www.example.com/index.html/a";
assertEquals(t, n(s).removeDirectoryIndex().toString());
s = "http://www.example.com/a/Default.asp";
t = "http://www.example.com/a/";
assertEquals(t, n(s).removeDirectoryIndex().toString());
s = "http://www.example.com/a/index.php?a=b&c=d";
t = "http://www.example.com/a/?a=b&c=d";
assertEquals(t, n(s).removeDirectoryIndex().toString());
s = "http://www.example.com/a/z.php?a=b&c=d/index.htm";
t = "http://www.example.com/a/z.php?a=b&c=d/index.htm";
assertEquals(t, n(s).removeDirectoryIndex().toString());
s = "http://www.example.com/index,html";
t = "http://www.example.com/index,html";
assertEquals(t, n(s).removeDirectoryIndex().toString());
}
|
@Override
public Object get(PropertyKey key) {
return get(key, ConfigurationValueOptions.defaults());
}
|
@Test
public void getUnsetValueThrowsException() {
mThrown.expect(RuntimeException.class);
mConfiguration.get(PropertyKey.S3A_ACCESS_KEY);
}
|
public static TencentClsLogCollectClient getTencentClsLogCollectClient() {
return TENCENT_CLS_LOG_COLLECT_CLIENT;
}
|
@Test
public void testGetAliyunSlsLogCollectClient() {
Assertions.assertEquals(LoggingTencentClsPluginDataHandler.getTencentClsLogCollectClient().getClass(), TencentClsLogCollectClient.class);
}
|
@Override
public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) {
super.onDataReceived(device, data);
if (data.size() < 2) {
onInvalidDataReceived(device, data);
return;
}
// Read flags
int offset = 0;
final int flags = data.getIntValue(Data.FORMAT_UINT8, offset);
final int hearRateType = (flags & 0x01) == 0 ? Data.FORMAT_UINT8 : Data.FORMAT_UINT16_LE;
final int sensorContactStatus = (flags & 0x06) >> 1;
final boolean sensorContactSupported = sensorContactStatus == 2 || sensorContactStatus == 3;
final boolean sensorContactDetected = sensorContactStatus == 3;
final boolean energyExpandedPresent = (flags & 0x08) != 0;
final boolean rrIntervalsPresent = (flags & 0x10) != 0;
offset += 1;
// Validate packet length
if (data.size() < 1 + (hearRateType & 0x0F)
+ (energyExpandedPresent ? 2 : 0)
+ (rrIntervalsPresent ? 2 : 0)) {
onInvalidDataReceived(device, data);
return;
}
// Prepare data
final Boolean sensorContact = sensorContactSupported ? sensorContactDetected : null;
final int heartRate = data.getIntValue(hearRateType, offset);
offset += hearRateType & 0xF;
Integer energyExpanded = null;
if (energyExpandedPresent) {
energyExpanded = data.getIntValue(Data.FORMAT_UINT16_LE, offset);
offset += 2;
}
List<Integer> rrIntervals = null;
if (rrIntervalsPresent) {
final int count = (data.size() - offset) / 2;
final List<Integer> intervals = new ArrayList<>(count);
for (int i = 0; i < count; ++i) {
intervals.add(data.getIntValue(Data.FORMAT_UINT16_LE, offset));
offset += 2;
}
rrIntervals = Collections.unmodifiableList(intervals);
}
onHeartRateMeasurementReceived(device, heartRate, sensorContact, energyExpanded, rrIntervals);
}
|
@Test
public void onInvalidDataReceived() {
success = false;
final Data data = new Data(new byte[] { 0b10111, 1, 1 });
response.onDataReceived(null, data);
assertFalse(response.isValid());
assertFalse(success);
}
|
@Override
public void write(int b) throws IOException {
byte[] data = new byte[] {
(byte) b
};
write(data);
}
|
@Test
public void testWrite() throws Exception {
DistributedLogManager dlm = mock(DistributedLogManager.class);
AppendOnlyStreamWriter writer = mock(AppendOnlyStreamWriter.class);
DLOutputStream out = new DLOutputStream(dlm, writer);
byte[] data = new byte[16];
out.write(data);
verify(writer, times(1)).write(data);
}
|
public void track(TableOperations ops) {
Preconditions.checkArgument(null != ops, "Invalid table ops: null");
tracker.put(ops, ops.io());
}
|
@SuppressWarnings("resource")
@Test
public void nullTableOps() {
assertThatThrownBy(() -> new FileIOTracker().track(null))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Invalid table ops: null");
}
|
public ExitStatus(Options options) {
this.options = options;
}
|
@Test
void with_passed_failed_scenarios() {
createRuntime();
bus.send(testCaseFinishedWithStatus(Status.PASSED));
bus.send(testCaseFinishedWithStatus(Status.FAILED));
assertThat(exitStatus.exitStatus(), is(equalTo((byte) 0x1)));
}
|
public static boolean hasText(String str) {
return (str != null && !str.isEmpty() && containsText(str));
}
|
@Test
void testHasText() {
// Test case 1: Empty string
assertFalse(StringUtils.hasText(""));
// Test case 2: String with whitespace only
assertFalse(StringUtils.hasText(" "));
// Test case 3: Null string
assertFalse(StringUtils.hasText(null));
// Test case 4: String with non-whitespace characters
assertTrue(StringUtils.hasText("hello"));
// Test case 5: String with both text and whitespace
assertTrue(StringUtils.hasText(" hello "));
}
|
static void validateEndpoint(String endpoint, String endpointName) throws MisfireException {
if (StringUtils.isNotEmpty(endpoint)) {
try {
new URL(endpoint).toURI();
} catch (Exception e) {
// Re-throw the exception to fail the input start attempt
throw new MisfireException(String.format(Locale.ROOT, "The specified [%s] Override Endpoint [%s] is invalid.",
endpointName, endpoint), e);
}
}
}
|
@Test
public void testValidateEndpoint() throws MisfireException {
// Validate that no exception occurs for valid URI.
KinesisTransport.validateEndpoint("https://graylog.org", "Graylog");
// Validate that no exception occurs for blank and null URL.
KinesisTransport.validateEndpoint("", "Blank");
KinesisTransport.validateEndpoint(null, "Null");
// Verify exception occurs for invalid URI.
assertThatThrownBy(() -> KinesisTransport.validateEndpoint("haha not a url", "Bad URI"))
.isExactlyInstanceOf(MisfireException.class)
.hasMessageContaining("Override Endpoint")
.hasMessageContaining("is invalid");
}
|
@Override
public long extract(final ConsumerRecord<Object, Object> record, final long previousTimestamp) {
try {
return delegate.extract(record, previousTimestamp);
} catch (final RuntimeException e) {
return handleFailure(record.key(), record.value(), e);
}
}
|
@Test
public void shouldLogExceptionsAndNotFailOnExtractFromRow() {
// Given:
final KsqlException e = new KsqlException("foo");
final LoggingTimestampExtractor extractor = new LoggingTimestampExtractor(
(k, v) -> {
throw e;
},
logger,
false
);
// When:
final long result = extractor.extract(KEY, VALUE);
// Then (did not throw):
verify(logger).error(RecordProcessingError
.recordProcessingError("Failed to extract timestamp from row", e,
() -> "key:" + KEY + ", value:" + VALUE));
assertThat(result, is(-1L));
}
|
public boolean checkStateUpdater(final long now,
final java.util.function.Consumer<Set<TopicPartition>> offsetResetter) {
addTasksToStateUpdater();
if (stateUpdater.hasExceptionsAndFailedTasks()) {
handleExceptionsFromStateUpdater();
}
if (stateUpdater.restoresActiveTasks()) {
handleRestoredTasksFromStateUpdater(now, offsetResetter);
}
return !stateUpdater.restoresActiveTasks()
&& !tasks.hasPendingTasksToInit();
}
|
@Test
public void shouldTransitMultipleRestoredTasksToRunning() {
final StreamTask task1 = statefulTask(taskId00, taskId00ChangelogPartitions)
.inState(State.RESTORING)
.withInputPartitions(taskId00Partitions).build();
final StreamTask task2 = statefulTask(taskId01, taskId01ChangelogPartitions)
.inState(State.RESTORING)
.withInputPartitions(taskId01Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTransitionToRunningOfRestoredTask(mkSet(task1, task2), tasks);
taskManager.checkStateUpdater(time.milliseconds(), noOpResetter);
verifyTransitionToRunningOfRestoredTask(mkSet(task1, task2), tasks);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.