focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@Override
public long count() {
return this.byteBufferHeader.limit() + this.selectMappedBufferResult.getSize();
}
|
@Test
public void OneMessageTransferCountTest() {
ByteBuffer byteBuffer = ByteBuffer.allocate(20);
byteBuffer.putInt(20);
SelectMappedBufferResult selectMappedBufferResult = new SelectMappedBufferResult(0,byteBuffer,20,new DefaultMappedFile());
OneMessageTransfer manyMessageTransfer = new OneMessageTransfer(byteBuffer,selectMappedBufferResult);
Assert.assertEquals(manyMessageTransfer.count(),40);
}
|
public static SerializableFunction<Row, Mutation> beamRowToMutationFn(
Mutation.Op operation, String table) {
return (row -> {
switch (operation) {
case INSERT:
return MutationUtils.createMutationFromBeamRows(Mutation.newInsertBuilder(table), row);
case DELETE:
return Mutation.delete(table, MutationUtils.createKeyFromBeamRow(row));
case UPDATE:
return MutationUtils.createMutationFromBeamRows(Mutation.newUpdateBuilder(table), row);
case REPLACE:
return MutationUtils.createMutationFromBeamRows(Mutation.newReplaceBuilder(table), row);
case INSERT_OR_UPDATE:
return MutationUtils.createMutationFromBeamRows(
Mutation.newInsertOrUpdateBuilder(table), row);
default:
throw new IllegalArgumentException(
String.format("Unknown mutation operation type: %s", operation));
}
});
}
|
@Test
public void testCreateInsertMutationFromRow() {
Mutation expectedMutation = createMutation(Mutation.Op.INSERT);
Mutation mutation = beamRowToMutationFn(Mutation.Op.INSERT, TABLE).apply(WRITE_ROW);
assertEquals(expectedMutation, mutation);
}
|
@Override
public long nextDelayDuration(int reconsumeTimes) {
if (reconsumeTimes < 0) {
reconsumeTimes = 0;
}
if (reconsumeTimes > 32) {
reconsumeTimes = 32;
}
return Math.min(max, initial * (long) Math.pow(multiplier, reconsumeTimes));
}
|
@Test
public void testNextDelayDuration() {
ExponentialRetryPolicy exponentialRetryPolicy = new ExponentialRetryPolicy();
long actual = exponentialRetryPolicy.nextDelayDuration(0);
assertThat(actual).isEqualTo(TimeUnit.SECONDS.toMillis(5));
actual = exponentialRetryPolicy.nextDelayDuration(10);
assertThat(actual).isEqualTo(TimeUnit.SECONDS.toMillis(1024 * 5));
}
|
public static <T> File appendLines(Collection<T> list, String path, String charset) throws IORuntimeException {
return writeLines(list, path, charset, true);
}
|
@Test
@Disabled
public void appendLinesTest(){
final List<String> list = ListUtil.toList("a", "b", "c");
FileUtil.appendLines(list, FileUtil.file("d:/test/appendLines.txt"), CharsetUtil.CHARSET_UTF_8);
}
|
private static void instanceTrackingConfig(XmlGenerator gen, InstanceTrackingConfig trackingConfig) {
gen.open("instance-tracking", "enabled", trackingConfig.isEnabled())
.node("file-name", trackingConfig.getFileName())
.node("format-pattern", trackingConfig.getFormatPattern())
.close();
}
|
@Test
public void testInstanceTrackingConfig() {
clientConfig.getInstanceTrackingConfig()
.setEnabled(true)
.setFileName("/dummy/file")
.setFormatPattern("dummy-pattern with $HZ_INSTANCE_TRACKING{placeholder} and $RND{placeholder}");
InstanceTrackingConfig originalConfig = clientConfig.getInstanceTrackingConfig();
InstanceTrackingConfig generatedConfig = newConfigViaGenerator().getInstanceTrackingConfig();
assertEquals(originalConfig.isEnabled(), generatedConfig.isEnabled());
assertEquals(originalConfig.getFileName(), generatedConfig.getFileName());
assertEquals(originalConfig.getFormatPattern(), generatedConfig.getFormatPattern());
}
|
public RandomForest merge(RandomForest other) {
if (!formula.equals(other.formula)) {
throw new IllegalArgumentException("RandomForest have different model formula");
}
Model[] forest = new Model[models.length + other.models.length];
System.arraycopy(models, 0, forest, 0, models.length);
System.arraycopy(other.models, 0, forest, models.length, other.models.length);
// rough estimation
ClassificationMetrics mergedMetrics = new ClassificationMetrics(
metrics.fitTime * other.metrics.fitTime,
metrics.scoreTime * other.metrics.scoreTime,
metrics.size,
(metrics.error * other.metrics.error) / 2,
(metrics.accuracy * other.metrics.accuracy) / 2,
(metrics.sensitivity * other.metrics.sensitivity) / 2,
(metrics.specificity * other.metrics.specificity) / 2,
(metrics.precision * other.metrics.precision) / 2,
(metrics.f1 * other.metrics.f1) / 2,
(metrics.mcc * other.metrics.mcc) / 2,
(metrics.auc * other.metrics.auc) / 2,
(metrics.logloss * other.metrics.logloss) / 2,
(metrics.crossentropy * other.metrics.crossentropy) / 2
);
double[] mergedImportance = importance.clone();
for (int i = 0; i < importance.length; i++) {
mergedImportance[i] += other.importance[i];
}
return new RandomForest(formula, k, forest, mergedMetrics, mergedImportance, classes);
}
|
@Test
public void testMerge() {
System.out.println("merge");
RandomForest forest1 = RandomForest.fit(Segment.formula, Segment.train, 100, 16, SplitRule.GINI, 20, 100, 5, 1.0, null, Arrays.stream(seeds));
RandomForest forest2 = RandomForest.fit(Segment.formula, Segment.train, 100, 16, SplitRule.GINI, 20, 100, 5, 1.0, null, Arrays.stream(seeds));
RandomForest forest = forest1.merge(forest2);
int error1 = Error.of(Segment.testy, forest1.predict(Segment.test));
int error2 = Error.of(Segment.testy, forest2.predict(Segment.test));
int error = Error.of(Segment.testy, forest.predict(Segment.test));
System.out.format("Forest 1 Error = %d%n", error1);
System.out.format("Forest 2 Error = %d%n", error2);
System.out.format("Merged Error = %d%n", error);
assertEquals(33, error1);
assertEquals(33, error2);
assertEquals(33, error);
}
|
public static <T> List<LocalProperty<T>> grouped(Collection<T> columns)
{
return ImmutableList.of(new GroupingProperty<>(columns));
}
|
@Test
public void testGroupedTuple()
{
List<LocalProperty<String>> actual = builder()
.grouped("a", "b", "c")
.build();
assertMatch(
actual,
builder().grouped("a", "b", "c", "d").build(),
Optional.of(grouped("d")));
assertMatch(
actual,
builder().grouped("a", "b", "c").build(),
Optional.empty());
assertMatch(
actual,
builder().grouped("a", "b").build(),
Optional.of(grouped("a", "b")));
assertMatch(
actual,
builder().grouped("a").build(),
Optional.of(grouped("a")));
assertMatch(
actual,
builder().grouped("a").grouped("b").build(),
Optional.of(grouped("a")),
Optional.of(grouped("b")));
}
|
@Description("constant representing not-a-number")
@ScalarFunction("nan")
@SqlType(StandardTypes.DOUBLE)
public static double NaN()
{
return Double.NaN;
}
|
@Test
public void testNaN()
{
assertFunction("nan()", DOUBLE, Double.NaN);
assertFunction("0.0E0 / 0.0E0", DOUBLE, Double.NaN);
}
|
@Override
public KsqlVersionMetrics collectMetrics() {
final KsqlVersionMetrics metricsRecord = new KsqlVersionMetrics();
metricsRecord.setTimestamp(TimeUnit.MILLISECONDS.toSeconds(clock.millis()));
metricsRecord.setConfluentPlatformVersion(AppInfo.getVersion());
metricsRecord.setKsqlComponentType(moduleType.name());
metricsRecord.setIsActive(activenessSupplier.get());
return metricsRecord;
}
|
@Test
public void shouldReportComponentType() {
// When:
final KsqlVersionMetrics metrics = basicCollector.collectMetrics();
// Then:
assertThat(metrics.getKsqlComponentType(), is(MODULE_TYPE.name()));
}
|
@Override
public void append(LogEvent event) {
all.mark();
switch (event.getLevel().getStandardLevel()) {
case TRACE:
trace.mark();
break;
case DEBUG:
debug.mark();
break;
case INFO:
info.mark();
break;
case WARN:
warn.mark();
break;
case ERROR:
error.mark();
break;
case FATAL:
fatal.mark();
break;
default:
break;
}
}
|
@Test
public void metersWarnEvents() {
when(event.getLevel()).thenReturn(Level.WARN);
appender.append(event);
assertThat(registry.meter(METRIC_NAME_PREFIX + ".all").getCount())
.isEqualTo(1);
assertThat(registry.meter(METRIC_NAME_PREFIX + ".warn").getCount())
.isEqualTo(1);
}
|
public Server getServer() {
return server;
}
|
@Test
public void testJettyOption_LowResourceMaxIdleTimeSetUp() throws Exception {
LowResourceMonitor lowResourceMonitor = webServer.getServer().getBean( LowResourceMonitor.class );
assertNotNull( lowResourceMonitor );
assertEquals( EXPECTED_RES_MAX_IDLE_TIME, lowResourceMonitor.getLowResourcesIdleTimeout() );
}
|
@Override
public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException {
return this.list(directory, listener, new HostPreferences(session.getHost()).getInteger("brick.listing.chunksize"));
}
|
@Test
public void testListEqualChunkSize() throws Exception {
final Path directory = new BrickDirectoryFeature(session).mkdir(new Path(new AlphanumericRandomStringService().random(), EnumSet.of(AbstractPath.Type.directory)), new TransferStatus());
final Path f1 = new BrickTouchFeature(session).touch(new Path(directory, new AlphanumericRandomStringService().random(), EnumSet.of(AbstractPath.Type.file)), new TransferStatus());
final Path f2 = new BrickTouchFeature(session).touch(new Path(directory, new AlphanumericRandomStringService().random(), EnumSet.of(AbstractPath.Type.file)), new TransferStatus());
final AttributedList<Path> list = new BrickListService(session).list(directory, new DisabledListProgressListener(), 2);
assertNotSame(AttributedList.emptyList(), list);
assertFalse(list.isEmpty());
assertEquals(2, list.size());
for(Path f : list) {
assertSame(directory, f.getParent());
}
assertTrue(list.contains(f1));
assertTrue(list.contains(f2));
new BrickDeleteFeature(session).delete(Collections.singletonList(directory), new DisabledPasswordCallback(), new Delete.DisabledCallback());
}
|
public static Map<String, Collection<String>> caseInsensitiveCopyOf(Map<String, Collection<String>> map) {
if (map == null) {
return Collections.emptyMap();
}
Map<String, Collection<String>> result =
new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
for (Map.Entry<String, Collection<String>> entry : map.entrySet()) {
String key = entry.getKey();
if (!result.containsKey(key)) {
result.put(key.toLowerCase(Locale.ROOT), new LinkedList<>());
}
result.get(key).addAll(entry.getValue());
}
result.replaceAll((key, value) -> Collections.unmodifiableCollection(value));
return Collections.unmodifiableMap(result);
}
|
@Test
void nullMap() {
// Act
Map<String, Collection<String>> actualMap = caseInsensitiveCopyOf(null);
// Assert result
assertThat(actualMap)
.isNotNull()
.isEmpty();
}
|
public static ObjectNode json(Highlights highlights) {
ObjectNode payload = objectNode();
ArrayNode devices = arrayNode();
ArrayNode hosts = arrayNode();
ArrayNode links = arrayNode();
payload.set(DEVICES, devices);
payload.set(HOSTS, hosts);
payload.set(LINKS, links);
highlights.devices().forEach(dh -> devices.add(json(dh)));
highlights.hosts().forEach(hh -> hosts.add(json(hh)));
highlights.links().forEach(lh -> links.add(json(lh)));
Highlights.Amount toSubdue = highlights.subdueLevel();
if (!toSubdue.equals(Highlights.Amount.ZERO)) {
payload.put(SUBDUE, toSubdue.toString());
}
int delay = highlights.delayMs();
if (delay > 0) {
payload.put(DELAY, delay);
}
return payload;
}
|
@Test
public void basicHighlights() {
Highlights h = new Highlights();
payload = TopoJson.json(h);
checkEmptyArrays();
String subdue = JsonUtils.string(payload, TopoJson.SUBDUE);
assertEquals("subdue", "", subdue);
}
|
@Override
public Optional<QueryId> chooseQueryToKill(List<QueryMemoryInfo> runningQueries, List<MemoryInfo> nodes)
{
Map<QueryId, Long> memoryReservationOnBlockedNodes = new HashMap<>();
for (MemoryInfo node : nodes) {
MemoryPoolInfo generalPool = node.getPools().get(GENERAL_POOL);
if (generalPool == null) {
continue;
}
if (generalPool.getFreeBytes() + generalPool.getReservedRevocableBytes() > 0) {
continue;
}
Map<QueryId, Long> queryMemoryReservations = generalPool.getQueryMemoryReservations();
queryMemoryReservations.forEach((queryId, memoryReservation) -> {
memoryReservationOnBlockedNodes.compute(queryId, (id, oldValue) -> oldValue == null ? memoryReservation : oldValue + memoryReservation);
});
}
return memoryReservationOnBlockedNodes.entrySet().stream()
.max(comparingLong(Map.Entry::getValue))
.map(Map.Entry::getKey);
}
|
@Test
public void testGeneralPoolHasNoReservation()
{
int reservePool = 10;
int generalPool = 12;
Map<String, Map<String, Long>> queries = ImmutableMap.<String, Map<String, Long>>builder()
.put("q_1", ImmutableMap.of("n1", 0L, "n2", 0L, "n3", 0L, "n4", 0L, "n5", 0L))
.put("q_r", ImmutableMap.of("n1", 6L, "n2", 6L, "n3", 6L, "n4", 6L, "n5", 6L))
.build();
assertEquals(
lowMemoryKiller.chooseQueryToKill(
toQueryMemoryInfoList("q_r", queries),
toNodeMemoryInfoList(reservePool, generalPool, "q_r", queries)),
Optional.empty());
}
|
UriEndpoint createUriEndpoint(String url, boolean isWs) {
return createUriEndpoint(url, isWs, connectAddress);
}
|
@Test
void createUriEndpointRelativeWithPort() {
String test = this.builder
.host("example.com")
.port(443)
.sslSupport()
.build()
.createUriEndpoint("/foo", false)
.toExternalForm();
assertThat(test).isEqualTo("https://example.com/foo");
}
|
@Nullable
public static Method findPropertySetter(
@Nonnull Class<?> clazz,
@Nonnull String propertyName,
@Nonnull Class<?> propertyType
) {
String setterName = "set" + toUpperCase(propertyName.charAt(0)) + propertyName.substring(1);
Method method;
try {
method = clazz.getMethod(setterName, propertyType);
} catch (NoSuchMethodException e) {
return null;
}
if (!Modifier.isPublic(method.getModifiers())) {
return null;
}
if (Modifier.isStatic(method.getModifiers())) {
return null;
}
Class<?> returnType = method.getReturnType();
if (returnType != void.class && returnType != Void.class && returnType != clazz) {
return null;
}
return method;
}
|
@Test
public void when_findPropertySetter_public_wrongReturnType_then_returnsNull() {
assertNull(findPropertySetter(JavaProperties.class, "intWithParameter", int.class));
}
|
public static <T> Bounded<T> from(BoundedSource<T> source) {
return new Bounded<>(null, source);
}
|
@Test
public void failsWhenCustomUnboundedSourceIsNotSerializable() {
thrown.expect(IllegalArgumentException.class);
Read.from(new NotSerializableUnboundedSource());
}
|
public static String readUtf8Str(String resource) {
return getResourceObj(resource).readUtf8Str();
}
|
@Test
public void readXmlTest(){
final String str = ResourceUtil.readUtf8Str("test.xml");
assertNotNull(str);
Resource resource = new ClassPathResource("test.xml");
final String xmlStr = resource.readUtf8Str();
assertEquals(str, xmlStr);
}
|
@Override
protected String buildUndoSQL() {
TableRecords beforeImage = sqlUndoLog.getBeforeImage();
List<Row> beforeImageRows = beforeImage.getRows();
if (CollectionUtils.isEmpty(beforeImageRows)) {
throw new ShouldNeverHappenException("Invalid UNDO LOG");
}
Row row = beforeImageRows.get(0);
List<Field> fields = new ArrayList<>(row.nonPrimaryKeys());
fields.addAll(getOrderedPkList(beforeImage, row, JdbcConstants.SQLSERVER));
// delete sql undo log before image all field come from table meta, need add escape.
// see BaseTransactionalExecutor#buildTableRecords
String insertColumns = fields.stream()
.map(field -> ColumnUtils.addEscape(field.getName(), JdbcConstants.SQLSERVER))
.collect(Collectors.joining(", "));
String insertValues = fields.stream().map(field -> "?")
.collect(Collectors.joining(", "));
return "SET IDENTITY_INSERT " +
sqlUndoLog.getTableName() +
" ON; INSERT INTO " +
sqlUndoLog.getTableName() +
" (" +
insertColumns +
") VALUES (" +
insertValues +
"); SET IDENTITY_INSERT " +
sqlUndoLog.getTableName() +
" OFF;";
}
|
@Test
public void buildUndoSQL() {
String sql = executor.buildUndoSQL().toUpperCase();
Assertions.assertNotNull(sql);
Assertions.assertTrue(sql.contains("INSERT"));
Assertions.assertTrue(sql.contains("TABLE_NAME"));
Assertions.assertTrue(sql.contains("ID"));
}
|
public void unschedule(String eventDefinitionId) {
final EventDefinitionDto eventDefinition = getEventDefinitionOrThrowIAE(eventDefinitionId);
if (SystemNotificationEventEntityScope.NAME.equals(eventDefinition.scope())) {
LOG.debug("Ignoring disable for system notification events");
return;
}
getJobDefinition(eventDefinition)
.ifPresent(jobDefinition -> deleteJobDefinitionAndTrigger(jobDefinition, eventDefinition));
eventDefinitionService.updateState(eventDefinitionId, EventDefinition.State.DISABLED);
}
|
@Test
@MongoDBFixtures("event-processors.json")
public void unscheduleRemovesNotifications() {
assertThat(jobTriggerService.get("61fbcca5b2507945cc120001")).isPresent();
assertThat(jobTriggerService.get("61fbcca5b2507945cc120002")).isPresent();
handler.unschedule("54e3deadbeefdeadbeef0000");
assertThat(jobTriggerService.get("61fbcca5b2507945cc120001")).isNotPresent();
assertThat(jobTriggerService.get("61fbcca5b2507945cc120002")).isPresent();
}
|
public Set<String> getApplicationNames() {
ImmutableSet.Builder<String> names = ImmutableSet.builder();
File[] files = appsDir.listFiles(File::isDirectory);
if (files != null) {
for (File file : files) {
names.add(file.getName());
}
}
return names.build();
}
|
@Test
public void getAppNames() throws IOException {
saveZippedApp();
Set<String> names = aar.getApplicationNames();
assertEquals("incorrect names", ImmutableSet.of(APP_NAME), names);
}
|
@Override
public void handleRequest(RestRequest request, RequestContext requestContext, Callback<RestResponse> callback)
{
//This code path cannot accept content types or accept types that contain
//multipart/related. This is because these types of requests will usually have very large payloads and therefore
//would degrade server performance since RestRequest reads everything into memory.
if (!isMultipart(request, requestContext, callback))
{
_restRestLiServer.handleRequest(request, requestContext, callback);
}
}
|
@Test
public void testStreamRequestMultiplexedRequestMultiPartAcceptType() throws Exception
{
//This test verifies that a StreamRequest sent to the RestLiServer throws an exception if the accept type contains
//multipart/related.
StreamRequest streamRequestMux = new StreamRequestBuilder(new URI("/mux"))
.setHeader(RestConstants.HEADER_ACCEPT, RestConstants.HEADER_VALUE_MULTIPART_RELATED).build(EntityStreams.emptyStream());
Callback<StreamResponse> callback = new Callback<StreamResponse>()
{
@Override
public void onSuccess(StreamResponse restResponse)
{
fail();
}
@Override
public void onError(Throwable e)
{
assertTrue(e instanceof StreamException);
StreamException streamException = (StreamException)e;
StreamResponse streamResponse = streamException.getResponse();
assertEquals(streamResponse.getStatus(), 406);
final FullEntityReader fullEntityReader = new FullEntityReader(new Callback<ByteString>()
{
@Override
public void onError(Throwable e)
{
Assert.fail();
}
@Override
public void onSuccess(ByteString result)
{
//We have the body so assert
assertTrue(result.length() > 0);
assertEquals(result.asString(Charset.defaultCharset()),
"This server cannot handle multiplexed requests that have an accept type of multipart/related");
}
});
streamResponse.getEntityStream().setReader(fullEntityReader);
}
};
_server.handleRequest(streamRequestMux, new RequestContext(), callback);
}
|
@Override
public boolean upload(String destPath, File file) {
put(FileUtil.getAbsolutePath(file), destPath);
return true;
}
|
@Test
@Disabled
public void uploadTest() {
sshjSftp.upload("/home/test/temp/", new File("C:\\Users\\akwangl\\Downloads\\temp\\辽宁_20190718_104324.CIME"));
}
|
public static void setContext(RpcInvokeContext context) {
LOCAL.set(RpcInvokeContext.clone(context));
}
|
@Test
public void testSetContext() {
RpcInvokeContext context = new RpcInvokeContext();
context.setTargetGroup("target");
context.setTargetURL("url");
context.setTimeout(111);
context.addCustomHeader("A", "B");
context.put("C", "D");
RpcInvokeContext.setContext(context);
Assert.assertEquals(context.getTargetGroup(), RpcInvokeContext.getContext().getTargetGroup());
Assert.assertEquals(context.getTargetURL(), RpcInvokeContext.getContext().getTargetURL());
Assert.assertEquals(context.getTimeout(), RpcInvokeContext.getContext().getTimeout());
Assert.assertEquals("B", RpcInvokeContext.getContext().getCustomHeader().get("A"));
Assert.assertEquals("D", RpcInvokeContext.getContext().get("C"));
Assert.assertTrue(context != RpcInvokeContext.getContext());
RpcInvokeContext.removeContext();
}
|
public static <T extends Enum<T>> T getForNameIgnoreCase(final @Nullable String value,
final @NotNull Class<T> enumType) {
return getForNameIgnoreCase(value, enumType, Map.of());
}
|
@Test
void shouldGetEnumForNameIgnoreCaseForFallback() {
TestEnum result = Enums.getForNameIgnoreCase("LEGACY", TestEnum.class, Map.of("legacy", TestEnum.ENUM2));
Assertions.assertEquals(TestEnum.ENUM2, result);
}
|
@VisibleForTesting
static EnumSet<MetricType> parseMetricTypes(String typeComponent) {
final String[] split = typeComponent.split(LIST_DELIMITER);
if (split.length == 1 && split[0].equals("*")) {
return ALL_METRIC_TYPES;
}
return EnumSet.copyOf(
Arrays.stream(split)
.map(s -> ConfigurationUtils.convertToEnum(s, MetricType.class))
.collect(Collectors.toSet()));
}
|
@Test
void testParseMetricTypesSingle() {
final EnumSet<MetricType> types = DefaultMetricFilter.parseMetricTypes("meter");
assertThat(types).containsExactly(MetricType.METER);
}
|
public boolean resetPendingIndex(final long newPendingIndex) {
final long stamp = this.stampedLock.writeLock();
try {
if (!(this.pendingIndex == 0 && this.pendingMetaQueue.isEmpty())) {
LOG.error("Node {} resetPendingIndex fail, pendingIndex={}, pendingMetaQueueSize={}.",
this.opts.getNodeId(), this.pendingIndex, this.pendingMetaQueue.size());
return false;
}
if (newPendingIndex <= this.lastCommittedIndex) {
LOG.error("Node {} resetPendingIndex fail, newPendingIndex={}, lastCommittedIndex={}.",
this.opts.getNodeId(), newPendingIndex, this.lastCommittedIndex);
return false;
}
this.pendingIndex = newPendingIndex;
this.closureQueue.resetFirstIndex(newPendingIndex);
return true;
} finally {
this.stampedLock.unlockWrite(stamp);
}
}
|
@Test
public void testResetPendingIndex() {
assertEquals(0, closureQueue.getFirstIndex());
assertEquals(0, box.getPendingIndex());
assertTrue(box.resetPendingIndex(1));
assertEquals(0, box.getLastCommittedIndex());
assertEquals(1, closureQueue.getFirstIndex());
assertEquals(1, box.getPendingIndex());
}
|
@Override
public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) {
super.onDataReceived(device, data);
if (data.size() < 3) {
onInvalidDataReceived(device, data);
return;
}
final int opCode = data.getIntValue(Data.FORMAT_UINT8, 0);
if (opCode != OP_CODE_NUMBER_OF_STORED_RECORDS_RESPONSE && opCode != OP_CODE_RESPONSE_CODE) {
onInvalidDataReceived(device, data);
return;
}
final int operator = data.getIntValue(Data.FORMAT_UINT8, 1);
if (operator != OPERATOR_NULL) {
onInvalidDataReceived(device, data);
return;
}
switch (opCode) {
case OP_CODE_NUMBER_OF_STORED_RECORDS_RESPONSE -> {
// Field size is defined per service
int numberOfRecords;
switch (data.size() - 2) {
case 1 -> numberOfRecords = data.getIntValue(Data.FORMAT_UINT8, 2);
case 2 -> numberOfRecords = data.getIntValue(Data.FORMAT_UINT16_LE, 2);
case 4 -> numberOfRecords = data.getIntValue(Data.FORMAT_UINT32_LE, 2);
default -> {
// Other field sizes are not supported
onInvalidDataReceived(device, data);
return;
}
}
onNumberOfRecordsReceived(device, numberOfRecords);
}
case OP_CODE_RESPONSE_CODE -> {
if (data.size() != 4) {
onInvalidDataReceived(device, data);
return;
}
final int requestCode = data.getIntValue(Data.FORMAT_UINT8, 2);
final int responseCode = data.getIntValue(Data.FORMAT_UINT8, 3);
if (responseCode == RACP_RESPONSE_SUCCESS) {
onRecordAccessOperationCompleted(device, requestCode);
} else if (responseCode == RACP_ERROR_NO_RECORDS_FOUND) {
onRecordAccessOperationCompletedWithNoRecordsFound(device, requestCode);
} else {
onRecordAccessOperationError(device, requestCode, responseCode);
}
}
}
}
|
@Test
public void onNumberOfRecordsReceived_uint32() {
final Data data = new Data(new byte[] { 5, 0, 4, 3, 2, 1 });
callback.onDataReceived(null, data);
assertEquals(numberOfRecords, 16909060);
}
|
public final int decrementAndGet() {
return INDEX_UPDATER.decrementAndGet(this) & Integer.MAX_VALUE;
}
|
@Test
void testDecrementAndGet() {
int get = i1.decrementAndGet();
assertEquals(Integer.MAX_VALUE, get);
assertEquals(Integer.MAX_VALUE, i1.get());
get = i2.decrementAndGet();
assertEquals(126, get);
assertEquals(126, i2.get());
get = i3.decrementAndGet();
assertEquals(Integer.MAX_VALUE - 1, get);
assertEquals(Integer.MAX_VALUE - 1, i3.get());
}
|
public HealthCheckResponse checkHealth() {
final Map<String, HealthCheckResponseDetail> results = DEFAULT_CHECKS.stream()
.collect(Collectors.toMap(
Check::getName,
check -> check.check(this)
));
final boolean allHealthy = results.values().stream()
.allMatch(HealthCheckResponseDetail::getIsHealthy);
final State serverState = commandRunner.checkServerState();
return new HealthCheckResponse(allHealthy, results, Optional.of(serverState.toString()));
}
|
@Test
public void shouldCheckHealth() {
// When:
final HealthCheckResponse response = healthCheckAgent.checkHealth();
// Then:
verify(ksqlClient, atLeastOnce()).makeKsqlRequest(eq(SERVER_URI), any(), eq(REQUEST_PROPERTIES));
assertThat(response.getDetails().get(METASTORE_CHECK_NAME).getIsHealthy(), is(true));
assertThat(response.getDetails().get(KAFKA_CHECK_NAME).getIsHealthy(), is(true));
assertThat(response.getDetails().get(COMMAND_RUNNER_CHECK_NAME).getIsHealthy(), is(true));
assertThat(response.getIsHealthy(), is(true));
}
|
@Override
public Iterable<Link> getLinks() {
return manager.getVirtualLinks(this.networkId())
.stream().collect(Collectors.toSet());
}
|
@Test(expected = NullPointerException.class)
public void testGetLinksByNullId() {
manager.registerTenantId(TenantId.tenantId(tenantIdValue1));
VirtualNetwork virtualNetwork = manager.createVirtualNetwork(TenantId.tenantId(tenantIdValue1));
LinkService linkService = manager.get(virtualNetwork.id(), LinkService.class);
// test the getLinks() method with a null connect point.
linkService.getLinks(null);
}
|
@SuppressWarnings({"SimplifyBooleanReturn"})
public static Map<String, ParamDefinition> cleanupParams(Map<String, ParamDefinition> params) {
if (params == null || params.isEmpty()) {
return params;
}
Map<String, ParamDefinition> mapped =
params.entrySet().stream()
.collect(
MapHelper.toListMap(
Map.Entry::getKey,
p -> {
ParamDefinition param = p.getValue();
if (param.getType() == ParamType.MAP) {
MapParamDefinition mapParamDef = param.asMapParamDef();
if (mapParamDef.getValue() == null
&& (mapParamDef.getInternalMode() == InternalParamMode.OPTIONAL)) {
return mapParamDef;
}
return MapParamDefinition.builder()
.name(mapParamDef.getName())
.value(cleanupParams(mapParamDef.getValue()))
.expression(mapParamDef.getExpression())
.name(mapParamDef.getName())
.validator(mapParamDef.getValidator())
.tags(mapParamDef.getTags())
.mode(mapParamDef.getMode())
.meta(mapParamDef.getMeta())
.build();
} else {
return param;
}
}));
Map<String, ParamDefinition> filtered =
mapped.entrySet().stream()
.filter(
p -> {
ParamDefinition param = p.getValue();
if (param.getInternalMode() == InternalParamMode.OPTIONAL) {
if (param.getValue() == null && param.getExpression() == null) {
return false;
} else if (param.getType() == ParamType.MAP
&& param.asMapParamDef().getValue() != null
&& param.asMapParamDef().getValue().isEmpty()) {
return false;
} else {
return true;
}
} else {
Checks.checkTrue(
param.getValue() != null || param.getExpression() != null,
String.format(
"[%s] is a required parameter (type=[%s])",
p.getKey(), param.getType()));
return true;
}
})
.collect(MapHelper.toListMap(Map.Entry::getKey, Map.Entry::getValue));
return cleanIntermediateMetadata(filtered);
}
|
@Test
public void testCleanupOptionalEmptyNestedMapMultipleElements() throws JsonProcessingException {
Map<String, ParamDefinition> allParams =
parseParamDefMap(
"{'data_auditor': {'type': 'MAP','value': {'winston': {'type': 'MAP','value': {}, 'internal_mode': 'OPTIONAL'}, 'audits': {'type': 'STRING_ARRAY','value': ['a','b'], 'internal_mode': 'REQUIRED'}}, 'internal_mode': 'REQUIRED'}}");
Map<String, ParamDefinition> cleanedParams = ParamsMergeHelper.cleanupParams(allParams);
assertFalse(
cleanedParams.get("data_auditor").asMapParamDef().getValue().containsKey("winston"));
}
|
@Override
public void renameProject(Project project, final String name) {
synchronized (this) {
project.getLookup().lookup(ProjectInformationImpl.class).setName(name);
fireProjectEvent((pl) -> pl.changed(project));
}
}
|
@Test
public void testRenameProject() {
ProjectControllerImpl pc = new ProjectControllerImpl();
pc.addProjectListener(projectListener);
Project project = pc.newProject();
pc.renameProject(project, "foo");
Assert.assertEquals("foo", project.getName());
Mockito.verify(projectListener).changed(project);
}
|
public static void getSemanticPropsSingleFromString(
SingleInputSemanticProperties result,
String[] forwarded,
String[] nonForwarded,
String[] readSet,
TypeInformation<?> inType,
TypeInformation<?> outType) {
getSemanticPropsSingleFromString(
result, forwarded, nonForwarded, readSet, inType, outType, false);
}
|
@Test
void testNonForwardedPojo() {
String[] nonForwardedFields = {"int1; string1"};
SingleInputSemanticProperties sp = new SingleInputSemanticProperties();
SemanticPropUtil.getSemanticPropsSingleFromString(
sp, null, nonForwardedFields, null, pojoType, pojoType);
assertThat(sp.getForwardingTargetFields(0, 0)).isEmpty();
assertThat(sp.getForwardingTargetFields(0, 1)).contains(1);
assertThat(sp.getForwardingTargetFields(0, 2)).contains(2);
assertThat(sp.getForwardingTargetFields(0, 3)).isEmpty();
}
|
@Override
public void setMonochrome(boolean monochrome) {
formats = monochrome ? monochrome() : ansi();
}
|
@Test
void should_print_output_from_afterStep_hooks() {
Feature feature = TestFeatureParser.parse("path/test.feature", "" +
"Feature: feature name\n" +
" Scenario: scenario name\n" +
" Given first step\n" +
" When second step\n");
ByteArrayOutputStream out = new ByteArrayOutputStream();
Runtime.builder()
.withFeatureSupplier(new StubFeatureSupplier(feature))
.withAdditionalPlugins(new PrettyFormatter(out))
.withRuntimeOptions(new RuntimeOptionsBuilder().setMonochrome().build())
.withBackendSupplier(new StubBackendSupplier(
emptyList(),
emptyList(),
asList(
new StubStepDefinition("first step", "path/step_definitions.java:3"),
new StubStepDefinition("second step", "path/step_definitions.java:4")),
singletonList(
new StubHookDefinition(testCaseState -> testCaseState.log("printed from afterstep hook"))),
emptyList()))
.build()
.run();
assertThat(out, bytes(equalToCompressingWhiteSpace("" +
"Scenario: scenario name # path/test.feature:2\n" +
" Given first step # path/step_definitions.java:3\n" +
"\n" +
" printed from afterstep hook\n" +
"\n" +
" When second step # path/step_definitions.java:4\n" +
"\n" +
" printed from afterstep hook" +
"\n")));
}
|
public static VerificationMode atMost(final int count) {
checkArgument(count > 0, "Times count must be greater than zero");
return new AtMostVerification(count);
}
|
@Test
public void should_verify_expected_request_for_at_most() throws Exception {
final HttpServer server = httpServer(port(), hit);
server.get(by(uri("/foo"))).response("bar");
running(server, () -> {
assertThat(helper.get(remoteUrl("/foo")), is("bar"));
assertThat(helper.get(remoteUrl("/foo")), is("bar"));
});
hit.verify(by(uri("/foo")), atMost(2));
}
|
public static Map<String, Map<String, InetSocketAddress>> getNNServiceRpcAddresses(
Configuration conf) throws IOException {
// Use default address as fall back
String defaultAddress;
try {
defaultAddress = NetUtils.getHostPortString(
DFSUtilClient.getNNAddress(conf));
} catch (IllegalArgumentException e) {
defaultAddress = null;
}
Map<String, Map<String, InetSocketAddress>> addressList =
DFSUtilClient.getAddresses(conf, defaultAddress,
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
DFS_NAMENODE_RPC_ADDRESS_KEY);
if (addressList.isEmpty()) {
throw new IOException("Incorrect configuration: namenode address "
+ DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + " or "
+ DFS_NAMENODE_RPC_ADDRESS_KEY
+ " is not configured.");
}
return addressList;
}
|
@Test
public void testDefaultNamenode() throws IOException {
HdfsConfiguration conf = new HdfsConfiguration();
final String hdfs_default = "hdfs://localhost:9999/";
conf.set(FS_DEFAULT_NAME_KEY, hdfs_default);
// If DFS_FEDERATION_NAMESERVICES is not set, verify that
// default namenode address is returned.
Map<String, Map<String, InetSocketAddress>> addrMap =
DFSUtil.getNNServiceRpcAddresses(conf);
assertEquals(1, addrMap.size());
Map<String, InetSocketAddress> defaultNsMap = addrMap.get(null);
assertEquals(1, defaultNsMap.size());
assertEquals(9999, defaultNsMap.get(null).getPort());
}
|
static PMMLRuntimeContext getPMMLRuntimeContext(PMMLRequestData pmmlRequestData,
final Map<String, GeneratedResources> generatedResourcesMap) {
String fileName = (String) pmmlRequestData.getMappedRequestParams().get(PMML_FILE_NAME).getValue();
PMMLRequestData cleaned = new PMMLRequestData(pmmlRequestData.getCorrelationId(),
pmmlRequestData.getModelName());
pmmlRequestData.getRequestParams().stream()
.filter(parameterInfo -> !parameterInfo.getName().equals(PMML_FILE_NAME) && !parameterInfo.getName().equals(PMML_MODEL_NAME))
.forEach(cleaned::addRequestParam);
PMMLRuntimeContext toReturn = new PMMLRuntimeContextImpl(cleaned, fileName,
new KieMemoryCompiler.MemoryCompilerClassLoader(Thread.currentThread().getContextClassLoader()));
toReturn.getGeneratedResourcesMap().putAll(generatedResourcesMap);
return toReturn;
}
|
@Test
void getPMMLRuntimeContextFromPMMLRequestData() {
PMMLRequestData pmmlRequestData = getPMMLRequestDataWithInputData(MODEL_NAME, FILE_NAME);
Map<String, ParameterInfo> mappedRequestParams = pmmlRequestData.getMappedRequestParams();
final Map<String, GeneratedResources> generatedResourcesMap = new HashMap<>();
IntStream.range(0, 3).forEach(value -> generatedResourcesMap.put("GenRes_" + value, new GeneratedResources()));
PMMLRuntimeContext retrieved = PMMLRuntimeHelper.getPMMLRuntimeContext(pmmlRequestData, generatedResourcesMap);
assertThat(retrieved).isNotNull();
PMMLRequestData pmmlRequestDataRetrieved = retrieved.getRequestData();
assertThat(pmmlRequestDataRetrieved).isNotNull();
assertThat(pmmlRequestDataRetrieved.getMappedRequestParams()).hasSize(mappedRequestParams.size() - 2); //
// Removing PMML_FILE_NAME and PMML_MODEL_NAME
assertThat(pmmlRequestDataRetrieved.getMappedRequestParams().entrySet())
.allMatch(entry -> mappedRequestParams.containsKey(entry.getKey()) &&
entry.getValue().getValue().equals(mappedRequestParams.get(entry.getKey()).getValue()));
Map<String, GeneratedResources> generatedResourcesMapRetrieved = retrieved.getGeneratedResourcesMap();
assertThat(generatedResourcesMapRetrieved).hasSize(generatedResourcesMap.size() + 1); // PMMLRuntimeContext
// already contains "pmml" GeneratedResources
assertThat(generatedResourcesMap.entrySet())
.allMatch(entry -> generatedResourcesMapRetrieved.containsKey(entry.getKey()) &&
entry.getValue().equals(generatedResourcesMapRetrieved.get(entry.getKey())));
}
|
@Override
public void encode(Event event, OutputStream output) throws IOException {
String outputString = (format == null
? event.toString()
: StringInterpolation.evaluate(event, format));
output.write(outputString.getBytes(charset));
}
|
@Test
public void testEncodeWithFormat() throws IOException {
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
Plain encoder = new Plain(new ConfigurationImpl(Collections.singletonMap("format", "%{host}-%{message}")), null);
String message = "Hello world";
String host = "test";
String expectedOutput = host + "-" + message;
Event e = new Event();
e.setField("message", message);
e.setField("host", host);
encoder.encode(e, outputStream);
String resultingString = outputStream.toString();
assertEquals(expectedOutput, resultingString);
}
|
public void commitSegmentFile(String realtimeTableName, CommittingSegmentDescriptor committingSegmentDescriptor)
throws Exception {
Preconditions.checkState(!_isStopping, "Segment manager is stopping");
String rawTableName = TableNameBuilder.extractRawTableName(realtimeTableName);
String segmentName = committingSegmentDescriptor.getSegmentName();
LOGGER.info("Committing segment file for segment: {}", segmentName);
// Copy the segment file to the controller
String segmentLocation = committingSegmentDescriptor.getSegmentLocation();
Preconditions.checkArgument(segmentLocation != null, "Segment location must be provided");
if (segmentLocation.regionMatches(true, 0, CommonConstants.Segment.PEER_SEGMENT_DOWNLOAD_SCHEME, 0,
CommonConstants.Segment.PEER_SEGMENT_DOWNLOAD_SCHEME.length())) {
LOGGER.info("No moving needed for segment on peer servers: {}", segmentLocation);
return;
}
URI tableDirURI = URIUtils.getUri(_controllerConf.getDataDir(), rawTableName);
PinotFS pinotFS = PinotFSFactory.create(tableDirURI.getScheme());
String uriToMoveTo = moveSegmentFile(rawTableName, segmentName, segmentLocation, pinotFS);
if (!isTmpSegmentAsyncDeletionEnabled()) {
try {
for (String uri : pinotFS.listFiles(tableDirURI, false)) {
if (uri.contains(SegmentCompletionUtils.getTmpSegmentNamePrefix(segmentName))) {
LOGGER.warn("Deleting temporary segment file: {}", uri);
Preconditions.checkState(pinotFS.delete(new URI(uri), true), "Failed to delete file: %s", uri);
}
}
} catch (Exception e) {
LOGGER.warn("Caught exception while deleting temporary segment files for segment: {}", segmentName, e);
}
}
committingSegmentDescriptor.setSegmentLocation(uriToMoveTo);
}
|
@Test
public void testCommitSegmentFile()
throws Exception {
PinotFSFactory.init(new PinotConfiguration());
File tableDir = new File(TEMP_DIR, RAW_TABLE_NAME);
String segmentName = new LLCSegmentName(RAW_TABLE_NAME, 0, 0, CURRENT_TIME_MS).getSegmentName();
String segmentFileName = SegmentCompletionUtils.generateTmpSegmentFileName(segmentName);
File segmentFile = new File(tableDir, segmentFileName);
FileUtils.write(segmentFile, "temporary file contents");
FakePinotLLCRealtimeSegmentManager segmentManager = new FakePinotLLCRealtimeSegmentManager();
String segmentLocation = SCHEME + tableDir + "/" + segmentFileName;
CommittingSegmentDescriptor committingSegmentDescriptor =
new CommittingSegmentDescriptor(segmentName, PARTITION_OFFSET.toString(), 0, segmentLocation);
segmentManager.commitSegmentFile(REALTIME_TABLE_NAME, committingSegmentDescriptor);
Assert.assertEquals(committingSegmentDescriptor.getSegmentLocation(),
URIUtils.getUri(tableDir.toString(), URIUtils.encode(segmentName)).toString());
assertFalse(segmentFile.exists());
}
|
@Override
public CompletableFuture<Set<AbstractID>> listCompletedClusterDatasetIds() {
return sendRequest(ClusterDataSetListHeaders.INSTANCE)
.thenApply(
clusterDataSetListResponseBody ->
clusterDataSetListResponseBody.getDataSets().stream()
.filter(ClusterDataSetEntry::isComplete)
.map(ClusterDataSetEntry::getDataSetId)
.map(id -> new AbstractID(StringUtils.hexStringToByte(id)))
.collect(Collectors.toSet()));
}
|
@Test
public void testListCompletedClusterDatasetIds() {
Set<AbstractID> expectedCompletedClusterDatasetIds = new HashSet<>();
expectedCompletedClusterDatasetIds.add(new AbstractID());
expectedCompletedClusterDatasetIds.add(new AbstractID());
try (TestRestServerEndpoint restServerEndpoint =
createRestServerEndpoint(
new TestListCompletedClusterDatasetHandler(
expectedCompletedClusterDatasetIds))) {
try (RestClusterClient<?> restClusterClient =
createRestClusterClient(restServerEndpoint.getServerAddress().getPort())) {
final Set<AbstractID> returnedIds =
restClusterClient.listCompletedClusterDatasetIds().get();
assertThat(returnedIds).isEqualTo(expectedCompletedClusterDatasetIds);
}
} catch (Exception e) {
e.printStackTrace();
}
}
|
@Override
public PageData<User> findCustomerUsers(UUID tenantId, UUID customerId, PageLink pageLink) {
return DaoUtil.toPageData(
userRepository
.findUsersByAuthority(
tenantId,
customerId,
pageLink.getTextSearch(),
Authority.CUSTOMER_USER,
DaoUtil.toPageable(pageLink)));
}
|
@Test
public void testFindCustomerUsers() {
PageLink pageLink = new PageLink(40);
PageData<User> customerUsers1 = userDao.findCustomerUsers(tenantId, customerId, pageLink);
assertEquals(40, customerUsers1.getData().size());
pageLink = pageLink.nextPageLink();
PageData<User> customerUsers2 = userDao.findCustomerUsers(tenantId, customerId,
pageLink);
assertEquals(20, customerUsers2.getData().size());
pageLink = pageLink.nextPageLink();
PageData<User> customerUsers3 = userDao.findCustomerUsers(tenantId, customerId,
pageLink);
assertEquals(0, customerUsers3.getData().size());
}
|
public boolean isUnqualifiedShorthandProjection() {
if (1 != projections.size()) {
return false;
}
Projection projection = projections.iterator().next();
return projection instanceof ShorthandProjection && !((ShorthandProjection) projection).getOwner().isPresent();
}
|
@Test
void assertUnqualifiedShorthandProjectionWithWrongShortProjection() {
ProjectionsContext projectionsContext = new ProjectionsContext(0, 0, true, Collections.singleton(getShorthandProjection()));
assertFalse(projectionsContext.isUnqualifiedShorthandProjection());
}
|
@Override
public ContinuousEnumerationResult planSplits(IcebergEnumeratorPosition lastPosition) {
table.refresh();
if (lastPosition != null) {
return discoverIncrementalSplits(lastPosition);
} else {
return discoverInitialSplits();
}
}
|
@Test
public void testIncrementalFromSnapshotIdWithInvalidIds() throws Exception {
appendTwoSnapshots();
// find an invalid snapshotId
long invalidSnapshotId = 0L;
while (invalidSnapshotId == snapshot1.snapshotId()
|| invalidSnapshotId == snapshot2.snapshotId()) {
invalidSnapshotId++;
}
ScanContext scanContextWithInvalidSnapshotId =
ScanContext.builder()
.startingStrategy(StreamingStartingStrategy.INCREMENTAL_FROM_SNAPSHOT_ID)
.startSnapshotId(invalidSnapshotId)
.build();
ContinuousSplitPlannerImpl splitPlanner =
new ContinuousSplitPlannerImpl(
TABLE_RESOURCE.tableLoader().clone(), scanContextWithInvalidSnapshotId, null);
assertThatThrownBy(() -> splitPlanner.planSplits(null))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Start snapshot id not found in history: " + invalidSnapshotId);
}
|
@Override
public void doAlarm(List<AlarmMessage> alarmMessages) throws Exception {
Map<String, PagerDutySettings> settingsMap = alarmRulesWatcher.getPagerDutySettings();
if (settingsMap == null || settingsMap.isEmpty()) {
return;
}
Map<String, List<AlarmMessage>> groupedMessages = groupMessagesByHook(alarmMessages);
for (Map.Entry<String, List<AlarmMessage>> entry : groupedMessages.entrySet()) {
var hookName = entry.getKey();
var messages = entry.getValue();
var setting = settingsMap.get(hookName);
if (setting == null || CollectionUtils.isEmpty(setting.getIntegrationKeys()) || CollectionUtils.isEmpty(
messages)) {
continue;
}
for (final var integrationKey : setting.getIntegrationKeys()) {
for (final var alarmMessage : messages) {
try {
post(
URI.create(PAGER_DUTY_EVENTS_API_V2_URL),
getMessageBody(alarmMessage, integrationKey, setting.getTextTemplate()), Map.of()
);
} catch (Exception e) {
log.error("Failed to send alarm message to PagerDuty: {}", integrationKey, e);
}
}
}
}
}
|
@Test
@Disabled
public void testWithRealAccount() throws Exception {
// replace this with your actual integration key(s) and run this test manually
List<String> integrationKeys = Arrays.asList(
"dummy-integration-key"
);
Rules rules = new Rules();
PagerDutySettings setting1 = new PagerDutySettings("setting1", AlarmHooksType.pagerduty, true);
setting1.setIntegrationKeys(integrationKeys);
setting1.setTextTemplate("Apache SkyWalking Alarm: \\n %s.");
PagerDutySettings setting2 = new PagerDutySettings("setting2", AlarmHooksType.pagerduty, false);
setting2.setIntegrationKeys(integrationKeys);
setting2.setTextTemplate("Apache SkyWalking Alarm: \\n %s.");
rules.getPagerDutySettingsMap().put(setting1.getFormattedName(), setting1);
rules.getPagerDutySettingsMap().put(setting2.getFormattedName(), setting2);
PagerDutyHookCallback pagerDutyHookCallback = new PagerDutyHookCallback(
new AlarmRulesWatcher(rules, null)
);
List<AlarmMessage> alarmMessages = new ArrayList<>(2);
AlarmMessage alarmMessage = new AlarmMessage();
alarmMessage.setScopeId(DefaultScopeDefine.SERVICE);
alarmMessage.setRuleName("service_resp_time_rule");
alarmMessage.setAlarmMessage("alarmMessage with [DefaultScopeDefine.All]");
alarmMessage.getHooks().add(setting1.getFormattedName());
alarmMessages.add(alarmMessage);
AlarmMessage anotherAlarmMessage = new AlarmMessage();
anotherAlarmMessage.setRuleName("service_resp_time_rule_2");
anotherAlarmMessage.setScopeId(DefaultScopeDefine.ENDPOINT);
anotherAlarmMessage.setAlarmMessage("anotherAlarmMessage with [DefaultScopeDefine.Endpoint]");
anotherAlarmMessage.getHooks().add(setting2.getFormattedName());
alarmMessages.add(anotherAlarmMessage);
pagerDutyHookCallback.doAlarm(alarmMessages);
// please check your pagerduty account to see if the alarm is sent
}
|
@Override
public double get(int index) {
int foundIndex = Arrays.binarySearch(indices, index);
if (foundIndex < 0) {
return 0;
} else {
return values[foundIndex];
}
}
|
@Test
public void serialization431Test() throws URISyntaxException, IOException {
Path vectorPath = Paths.get(SparseVectorTest.class.getResource("sparse-vector-431.tribuo").toURI());
try (InputStream fis = Files.newInputStream(vectorPath)) {
TensorProto proto = TensorProto.parseFrom(fis);
Tensor vector = Tensor.deserialize(proto);
assertEquals(generateVectorA(), vector);
}
}
|
@VisibleForTesting
public List<ProjectionContext> planRemoteAssignments(Assignments assignments, VariableAllocator variableAllocator)
{
ImmutableList.Builder<List<ProjectionContext>> assignmentProjections = ImmutableList.builder();
for (Map.Entry<VariableReferenceExpression, RowExpression> entry : assignments.getMap().entrySet()) {
List<ProjectionContext> rewritten = entry.getValue().accept(new Visitor(functionAndTypeManager, variableAllocator), null);
if (rewritten.isEmpty()) {
assignmentProjections.add(ImmutableList.of(new ProjectionContext(ImmutableMap.of(entry.getKey(), entry.getValue()), false)));
}
else {
checkState(rewritten.get(rewritten.size() - 1).getProjections().size() == 1, "Expect at most 1 assignment from last projection in rewrite");
ProjectionContext last = rewritten.get(rewritten.size() - 1);
ImmutableList.Builder<ProjectionContext> projectionContextBuilder = ImmutableList.builder();
projectionContextBuilder.addAll(rewritten.subList(0, rewritten.size() - 1));
projectionContextBuilder.add(new ProjectionContext(ImmutableMap.of(entry.getKey(), getOnlyElement(last.getProjections().values())), last.isRemote()));
assignmentProjections.add(projectionContextBuilder.build());
}
}
List<ProjectionContext> mergedProjectionContexts = mergeProjectionContexts(assignmentProjections.build());
return dedupVariables(mergedProjectionContexts);
}
|
@Test
void testRemoteAndLocal()
{
PlanBuilder planBuilder = new PlanBuilder(TEST_SESSION, new PlanNodeIdAllocator(), getMetadata());
planBuilder.variable("x", INTEGER);
planBuilder.variable("y", INTEGER);
PlanRemoteProjections rule = new PlanRemoteProjections(getFunctionAndTypeManager());
List<ProjectionContext> rewritten = rule.planRemoteAssignments(Assignments.builder()
.put(planBuilder.variable("a"), planBuilder.rowExpression("unittest.memory.remote_foo(x, y + unittest.memory.remote_foo(x))"))
.put(planBuilder.variable("b"), planBuilder.rowExpression("abs(x)"))
.put(planBuilder.variable("c"), planBuilder.rowExpression("abs(unittest.memory.remote_foo())"))
.put(planBuilder.variable("d"), planBuilder.rowExpression("unittest.memory.remote_foo(x + y, abs(x))"))
.build(), new VariableAllocator(planBuilder.getTypes().allVariables()));
assertEquals(rewritten.size(), 4);
assertEquals(rewritten.get(3).getProjections().size(), 4);
}
|
public static boolean isSymlink(@NonNull File file) throws IOException {
return isSymlink(fileToPath(file));
}
|
@Test
public void testIsSymlink_onWindows_junction() throws Exception {
assumeTrue("Uses Windows-specific features", Functions.isWindows());
File targetDir = tmp.newFolder("targetDir");
File d = tmp.newFolder("dir");
File junction = WindowsUtil.createJunction(new File(d, "junction"), targetDir);
assertTrue(Util.isSymlink(junction));
}
|
public static CharSequence getCharsetAsSequence(HttpMessage message) {
CharSequence contentTypeValue = message.headers().get(HttpHeaderNames.CONTENT_TYPE);
if (contentTypeValue != null) {
return getCharsetAsSequence(contentTypeValue);
} else {
return null;
}
}
|
@Test
public void testGetCharsetAsRawCharSequence() {
String QUOTES_CHARSET_CONTENT_TYPE = "text/html; charset=\"utf8\"";
String SIMPLE_CONTENT_TYPE = "text/html";
HttpMessage message = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK);
message.headers().set(HttpHeaderNames.CONTENT_TYPE, QUOTES_CHARSET_CONTENT_TYPE);
assertEquals("\"utf8\"", HttpUtil.getCharsetAsSequence(message));
assertEquals("\"utf8\"", HttpUtil.getCharsetAsSequence(QUOTES_CHARSET_CONTENT_TYPE));
message.headers().set(HttpHeaderNames.CONTENT_TYPE, "text/html");
assertNull(HttpUtil.getCharsetAsSequence(message));
assertNull(HttpUtil.getCharsetAsSequence(SIMPLE_CONTENT_TYPE));
}
|
@Override
public void batchRegisterService(String serviceName, String groupName, List<Instance> instances)
throws NacosException {
redoService.cacheInstanceForRedo(serviceName, groupName, instances);
doBatchRegisterService(serviceName, groupName, instances);
}
|
@Test
void testBatchRegisterService() throws NacosException {
List<Instance> instanceList = new ArrayList<>();
instance.setHealthy(true);
instanceList.add(instance);
response = new BatchInstanceResponse();
when(this.rpcClient.request(any())).thenReturn(response);
client.batchRegisterService(SERVICE_NAME, GROUP_NAME, instanceList);
verify(this.rpcClient, times(1)).request(argThat(request -> {
if (request instanceof BatchInstanceRequest) {
BatchInstanceRequest request1 = (BatchInstanceRequest) request;
request1.setRequestId("1");
return request1.getType().equals(NamingRemoteConstants.BATCH_REGISTER_INSTANCE);
}
return false;
}));
}
|
@VisibleForTesting
public void validateDictDataExists(Long id) {
if (id == null) {
return;
}
DictDataDO dictData = dictDataMapper.selectById(id);
if (dictData == null) {
throw exception(DICT_DATA_NOT_EXISTS);
}
}
|
@Test
public void testValidateDictDataExists_success() {
// mock 数据
DictDataDO dbDictData = randomDictDataDO();
dictDataMapper.insert(dbDictData);// @Sql: 先插入出一条存在的数据
// 调用成功
dictDataService.validateDictDataExists(dbDictData.getId());
}
|
@Override
public SelType call(String methodName, SelType[] args) {
if ("min".equals(methodName) && args.length == 2) {
return SelLong.of(Math.min(((SelLong) args[0]).longVal(), ((SelLong) args[1]).longVal()));
} else if ("max".equals(methodName) && args.length == 2) {
return SelLong.of(Math.max(((SelLong) args[0]).longVal(), ((SelLong) args[1]).longVal()));
} else if ("random".equals(methodName) && args.length == 0) {
return SelDouble.of(Math.random());
} else if ("pow".equals(methodName) && args.length == 2) {
return SelDouble.of(
Math.pow(
((Number) args[0].getInternalVal()).doubleValue(),
((Number) args[1].getInternalVal()).doubleValue()));
}
throw new UnsupportedOperationException(
type()
+ " DO NOT support calling method: "
+ methodName
+ " with args: "
+ Arrays.toString(args));
}
|
@Test(expected = UnsupportedOperationException.class)
public void testInvalidCallMethod() {
SelJavaMath.INSTANCE.call("invalid", new SelType[] {});
}
|
public static boolean isTrue(Boolean bool) {
return Boolean.TRUE.equals(bool);
}
|
@Test
public void assertIsTrue() {
Assert.assertTrue(BooleanUtil.isTrue(true));
}
|
@Override
public String named() {
return PluginEnum.PARAM_MAPPING.getName();
}
|
@Test
public void tesNamed() {
assertEquals(this.paramMappingPlugin.named(), PluginEnum.PARAM_MAPPING.getName());
}
|
public PatternLayoutEncoder getTagEncoder() {
return this.tagEncoder;
}
|
@Test
public void tagExcludesStackTraces() {
// create logging event with throwable
LoggingEvent event = new LoggingEvent();
Throwable throwable = new Throwable("throwable");
ThrowableProxy tp = new ThrowableProxy(throwable);
event.setThrowableProxy(tp);
event.setMessage(TAG);
setTagPattern(TAG, true);
// if the tags match, it does not include the stack trace
String actualTag = logcatAppender.getTagEncoder().getLayout().doLayout(event);
assertThat(TAG, is(actualTag));
}
|
public String formatHex() {
return ByteUtils.formatHex(bytes);
}
|
@Test
@Parameters(method = "bytesToHexStringVectors")
public void formatHexValid(byte[] bytes, String expectedHexString) {
ByteArray ba = new ByteArray(bytes);
assertEquals("incorrect hex formatted string", expectedHexString, ba.formatHex());
}
|
public static InMemorySorter create(Options options) {
return new InMemorySorter(options);
}
|
@Test
public void testMultipleIterations() throws Exception {
SorterTestUtils.testMultipleIterations(InMemorySorter.create(new InMemorySorter.Options()));
}
|
static void dissectRecordingSessionStateChange(
final MutableDirectBuffer buffer, final int offset, final StringBuilder builder)
{
int absoluteOffset = offset;
absoluteOffset += dissectLogHeader(CONTEXT, RECORDING_SESSION_STATE_CHANGE, buffer, absoluteOffset, builder);
final long recordingId = buffer.getLong(absoluteOffset, LITTLE_ENDIAN);
absoluteOffset += SIZE_OF_LONG;
final long position = buffer.getLong(absoluteOffset, LITTLE_ENDIAN);
absoluteOffset += SIZE_OF_LONG;
builder.append(": recordingId=").append(recordingId);
builder.append(" position=").append(position);
builder.append(" ");
absoluteOffset += buffer.getStringAscii(absoluteOffset, builder);
absoluteOffset += SIZE_OF_INT;
builder.append(" reason=\"");
buffer.getStringAscii(absoluteOffset, builder);
builder.append("\"");
}
|
@Test
void recordingSessionStateChange()
{
final String reason = "some other reason";
internalEncodeLogHeader(buffer, 0, 10, 20, () -> 1_700_000_000L);
buffer.putLong(LOG_HEADER_LENGTH, 30_000_000_000L, LITTLE_ENDIAN);
buffer.putLong(LOG_HEADER_LENGTH + SIZE_OF_LONG, 40_000_000_000L, LITTLE_ENDIAN);
final int stateChangeLength = buffer.putStringAscii(LOG_HEADER_LENGTH + 2 * SIZE_OF_LONG, "x -> y");
buffer.putStringAscii(LOG_HEADER_LENGTH + 2 * SIZE_OF_LONG + stateChangeLength, reason);
dissectRecordingSessionStateChange(buffer, 0, builder);
assertEquals("[1.700000000] " + CONTEXT + ": " + RECORDING_SESSION_STATE_CHANGE.name() + " [10/20]:" +
" recordingId=30000000000" +
" position=40000000000" +
" x -> y" +
" reason=\"some other reason\"",
builder.toString());
}
|
@Override
public int intersection(String... names) {
return get(intersectionAsync(names));
}
|
@Test
public void testIntersection() {
RSet<Integer> set = redisson.getSet("set");
set.add(5);
set.add(6);
RSet<Integer> set1 = redisson.getSet("set1");
set1.add(1);
set1.add(2);
set1.add(3);
RSet<Integer> set2 = redisson.getSet("set2");
set2.add(3);
set2.add(4);
set2.add(5);
assertThat(set.intersection("set1", "set2")).isEqualTo(1);
assertThat(set).containsOnly(3);
}
|
public V put(final int key, final V value) {
final Entry<V>[] table = this.table;
final int index = HashUtil.indexFor(key, table.length, mask);
for (Entry<V> e = table[index]; e != null; e = e.hashNext) {
if (e.key == key) {
return e.setValue(value);
}
}
final Entry<V> e = new Entry<>(key, value);
e.hashNext = table[index];
table[index] = e;
_size += 1;
if (_size > capacity) {
rehash(HashUtil.nextCapacity(capacity));
}
return null;
}
|
@Test
public void forEachProcedure() {
final IntHashMap<String> tested = new IntHashMap<>();
for (int i = 0; i < 100000; ++i) {
tested.put(i, Integer.toString(i));
}
final int[] ii = {0};
tested.forEachKey(object -> {
ii[0]++;
return true;
});
tested.forEachValue(object -> {
ii[0]++;
return true;
});
Assert.assertEquals(tested.size() * 2, ii[0]);
ii[0] = 0;
tested.forEachKey(object -> {
ii[0]++;
return object < 500;
});
tested.forEachValue(object -> {
ii[0]++;
return true;
});
Assert.assertEquals(tested.size() + 501, ii[0]);
}
|
public long put(final long key, final long value) {
assert key != missingValue : "Invalid key " + key;
assert value != missingValue : "Invalid value " + value;
long oldValue = missingValue;
int index = evenLongHash(key, mask);
long candidateKey;
while ((candidateKey = entries[index]) != missingValue) {
if (candidateKey == key) {
oldValue = entries[index + 1];
break;
}
index = next(index);
}
if (oldValue == missingValue) {
++size;
entries[index] = key;
}
entries[index + 1] = value;
checkResize();
return oldValue;
}
|
@Test
public void putShouldReturnOldValue() {
map.put(1L, 1L);
assertEquals(1L, map.put(1L, 2L));
}
|
public static String before(String text, String before) {
if (text == null) {
return null;
}
int pos = text.indexOf(before);
return pos == -1 ? null : text.substring(0, pos);
}
|
@Test
public void testBefore() {
assertEquals("Hello ", StringHelper.before("Hello World", "World"));
assertEquals("Hello ", StringHelper.before("Hello World Again", "World"));
assertNull(StringHelper.before("Hello Again", "Foo"));
assertTrue(StringHelper.before("mykey:ignore", ":", "mykey"::equals).orElse(false));
assertFalse(StringHelper.before("ignore:ignore", ":", "mykey"::equals).orElse(false));
assertEquals("", StringHelper.before("Hello World", "Test", ""));
assertNull(StringHelper.before("Hello World", "Test", (String) null));
assertEquals("a:b", StringHelper.beforeLast("a:b:c", ":"));
assertEquals("", StringHelper.beforeLast("a:b:c", "_", ""));
}
|
@Override
public String getStatusText() {
return this.response.getStatusLine().getReasonPhrase();
}
|
@Test
void testGetStatusText() {
when(statusLine.getReasonPhrase()).thenReturn("test");
assertEquals("test", clientHttpResponse.getStatusText());
}
|
@Override
public Optional<EfestoOutputPMML> evaluateInput(EfestoInput<Map<String, Object>> toEvaluate,
EfestoRuntimeContext context) {
return executeEfestoInputFromMap(toEvaluate, context);
}
|
@Test
void evaluateWrongIdentifier() {
modelLocalUriId = getModelLocalUriIdFromPmmlIdFactory(FILE_NAME, "wrongmodel");
inputPMML = new BaseEfestoInput<>(modelLocalUriId,
getInputData(MODEL_NAME, FILE_NAME));
efestoRuntimeContext = getEfestoContext(memoryCompilerClassLoader);
Optional<EfestoOutputPMML> retrieved = kieRuntimeServicePMMLMapInput.evaluateInput(inputPMML,
efestoRuntimeContext);
assertThat(retrieved).isNotNull().isNotPresent();
}
|
@Override
public String buildContext() {
final String plugins = ((Collection<?>) getSource())
.stream()
.map(s -> ((PluginDO) s).getName())
.collect(Collectors.joining(","));
return String.format("the plugins[%s] is %s", plugins, StringUtils.lowerCase(getType().getType().toString()));
}
|
@Test
public void batchChangePluginBuildContextTest() {
String context = String.format("the plugins[%s] is %s", "test-plugin,test-plugin-two", EventTypeEnum.PLUGIN_DELETE.getType().toString().toLowerCase());
assertEquals(context, deletedEvent.buildContext());
}
|
public static String getTextValue(final Object jdbcBitValue) {
if (null == jdbcBitValue) {
return null;
}
return (Boolean) jdbcBitValue ? "1" : "0";
}
|
@Test
void assertGetTextBitValue() {
Object jdbcBitValue = true;
String textValue = PostgreSQLTextBitUtils.getTextValue(jdbcBitValue);
assertThat(textValue, is("1"));
}
|
@Override
public Object toKsqlRow(final Schema connectSchema, final Object connectData) {
if (connectData == null) {
return null;
}
return toKsqlValue(schema, connectSchema, connectData, "");
}
|
@Test
public void shouldTranslateNullValueCorrectly() {
// Given:
final Schema rowSchema = SchemaBuilder.struct()
.field("INT", SchemaBuilder.OPTIONAL_INT32_SCHEMA)
.optional()
.build();
final Struct connectStruct = new Struct(rowSchema);
final ConnectDataTranslator connectToKsqlTranslator = new ConnectDataTranslator(rowSchema);
// When:
final Struct row = (Struct) connectToKsqlTranslator.toKsqlRow(rowSchema, connectStruct);
// Then:
assertThat(row.get("INT"), is(nullValue()));
}
|
@VisibleForTesting
static List<String> truncateToMaxWidth(List<String> lines) {
List<String> truncatedLines = new ArrayList<>();
for (String line : lines) {
if (line.length() > MAX_FOOTER_WIDTH) {
truncatedLines.add(line.substring(0, MAX_FOOTER_WIDTH - 3) + "...");
} else {
truncatedLines.add(line);
}
}
return truncatedLines;
}
|
@Test
public void testTruncateToMaxWidth() {
List<String> lines =
Arrays.asList(
"this line of text is way too long and will be truncated",
"this line will not be truncated");
Assert.assertEquals(
Arrays.asList(
"this line of text is way too long and will be t...",
"this line will not be truncated"),
AnsiLoggerWithFooter.truncateToMaxWidth(lines));
}
|
public void checkpoint() throws IOException {
trashPolicy.createCheckpoint();
}
|
@Test
public void testTrashRestarts() throws Exception {
Configuration conf = new Configuration();
conf.setClass("fs.trash.classname",
AuditableTrashPolicy.class,
TrashPolicy.class);
conf.setClass("fs.file.impl", TestLFS.class, FileSystem.class);
conf.set(FS_TRASH_INTERVAL_KEY, "50"); // in milliseconds for test
Trash trash = new Trash(conf);
// create 5 checkpoints
for(int i=0; i<5; i++) {
trash.checkpoint();
}
// Run the trash emptier for 120ms, it should run
// 2 times deletion as the interval is 50ms.
// Verify the checkpoints number when shutting down the emptier.
verifyAuditableTrashEmptier(trash, 120, 3);
// reconfigure the interval to 100 ms
conf.set(FS_TRASH_INTERVAL_KEY, "100");
Trash trashNew = new Trash(conf);
// Run the trash emptier for 120ms, it should run
// 1 time deletion.
verifyAuditableTrashEmptier(trashNew, 120, 2);
}
|
@Override
public void handlerRule(final RuleData ruleData) {
HystrixPropertiesFactory.reset();
Optional.ofNullable(ruleData.getHandle()).ifPresent(rule -> {
HystrixHandle hystrixHandle = GsonUtils.getInstance().fromJson(rule, HystrixHandle.class);
String key = CacheKeyUtils.INST.getKey(ruleData);
Optional.ofNullable(CACHED_HANDLE.get().obtainHandle(key)).ifPresent(hystrixHandleCache -> {
if (hystrixHandleCache.getMaxConcurrentRequests() != hystrixHandle.getMaxConcurrentRequests()) {
String commandKey = hystrixHandle.getCommandKey();
Command command = new HystrixCommand(HystrixBuilder.build(hystrixHandle), null, null, null);
command.removeCommandKey(commandKey);
}
// fix ISSUE #3820, in same rule, change isolation strategy, can't circuit breaker
if (hystrixHandleCache.getExecutionIsolationStrategy() != hystrixHandle.getExecutionIsolationStrategy()) {
Command command = new HystrixCommand(HystrixBuilder.build(hystrixHandleCache), null, null, null);
if (StringUtils.hasText(hystrixHandle.getCommandKey())) {
command.removeCommandKey(hystrixHandle.getCommandKey());
} else {
// delete all old Commands of the specified group
command.cleanCommand();
}
}
});
CACHED_HANDLE.get().cachedHandle(key, hystrixHandle);
});
}
|
@Test
public void testHandlerRUle() {
hystrixPluginDataHandler.handlerRule(mock(RuleData.class));
assertNotNull(HystrixPropertiesFactory.getCommandProperties(mock(HystrixCommandKey.class), mock(Setter.class)));
}
|
@Override
@Transactional(rollbackFor = Exception.class)
public void deleteCombinationActivity(Long id) {
// 校验存在
CombinationActivityDO activity = validateCombinationActivityExists(id);
// 校验状态
if (CommonStatusEnum.isEnable(activity.getStatus())) {
throw exception(COMBINATION_ACTIVITY_DELETE_FAIL_STATUS_NOT_CLOSED_OR_END);
}
// 删除
combinationActivityMapper.deleteById(id);
}
|
@Test
public void testDeleteCombinationActivity_notExists() {
// 准备参数
Long id = randomLongId();
// 调用, 并断言异常
assertServiceException(() -> combinationActivityService.deleteCombinationActivity(id), COMBINATION_ACTIVITY_NOT_EXISTS);
}
|
@Override
public WriteTxnMarkersResponse getErrorResponse(int throttleTimeMs, Throwable e) {
Errors error = Errors.forException(e);
final Map<Long, Map<TopicPartition, Errors>> errors = new HashMap<>(data.markers().size());
for (WritableTxnMarker markerEntry : data.markers()) {
Map<TopicPartition, Errors> errorsPerPartition = new HashMap<>();
for (WritableTxnMarkerTopic topic : markerEntry.topics()) {
for (Integer partitionIdx : topic.partitionIndexes()) {
errorsPerPartition.put(new TopicPartition(topic.name(), partitionIdx), error);
}
}
errors.put(markerEntry.producerId(), errorsPerPartition);
}
return new WriteTxnMarkersResponse(errors);
}
|
@Test
public void testGetErrorResponse() {
WriteTxnMarkersRequest.Builder builder = new WriteTxnMarkersRequest.Builder(ApiKeys.WRITE_TXN_MARKERS.latestVersion(), markers);
for (short version : ApiKeys.WRITE_TXN_MARKERS.allVersions()) {
WriteTxnMarkersRequest request = builder.build(version);
WriteTxnMarkersResponse errorResponse =
request.getErrorResponse(throttleTimeMs, Errors.UNKNOWN_PRODUCER_ID.exception());
assertEquals(Collections.singletonMap(
TOPIC_PARTITION, Errors.UNKNOWN_PRODUCER_ID), errorResponse.errorsByProducerId().get(PRODUCER_ID));
assertEquals(Collections.singletonMap(Errors.UNKNOWN_PRODUCER_ID, 1), errorResponse.errorCounts());
// Write txn marker has no throttle time defined in response.
assertEquals(0, errorResponse.throttleTimeMs());
}
}
|
public boolean isHeaderColumn(final ColumnName columnName) {
return findColumnMatching(withNamespace(HEADERS).and(withName(columnName)))
.isPresent();
}
|
@Test
public void shouldMatchHeaderColumnName() {
assertThat(SOME_SCHEMA.isHeaderColumn(H0), is(true));
assertThat(SOME_SCHEMA.isHeaderColumn(ROWPARTITION_NAME), is(false));
assertThat(SOME_SCHEMA.isHeaderColumn(K0), is(false));
assertThat(SOME_SCHEMA.isHeaderColumn(F0), is(false));
}
|
@Override
public String getName() {
return this.name;
}
|
@Test
public void keep_500_first_characters_of_name() {
String veryLongString = repeat("a", 3_000);
ComponentImpl underTest = buildSimpleComponent(FILE, "file")
.setName(veryLongString)
.build();
String expectedName = repeat("a", 500 - 3) + "...";
assertThat(underTest.getName()).isEqualTo(expectedName);
}
|
public static List<ColumnarBatch> readParquetFile(String filePath, StructType physicalSchema, Configuration hadoopConf) {
try (Timer ignored = Tracers.watchScope(Tracers.get(), EXTERNAL,
"DeltaLakeParquetHandler.readParquetFileAndGetColumnarBatch")) {
io.delta.kernel.defaults.internal.parquet.ParquetFileReader batchReader =
new io.delta.kernel.defaults.internal.parquet.ParquetFileReader(hadoopConf);
CloseableIterator<ColumnarBatch> currentFileReader = batchReader.read(filePath, physicalSchema, Optional.empty());
List<ColumnarBatch> result = Lists.newArrayList();
while (currentFileReader != null && currentFileReader.hasNext()) {
result.add(currentFileReader.next());
}
Utils.closeCloseables(currentFileReader);
return result;
}
}
|
@Test
public void testCheckpointCache() throws ExecutionException {
LoadingCache<Pair<String, StructType>, List<ColumnarBatch>> checkpointCache = CacheBuilder.newBuilder()
.expireAfterWrite(3600, TimeUnit.SECONDS)
.weigher((key, value) ->
Math.toIntExact(SizeEstimator.estimate(key) + SizeEstimator.estimate(value)))
.maximumWeight(1024 * 2)
.concurrencyLevel(1)
.build(new CacheLoader<>() {
@NotNull
@Override
public List<ColumnarBatch> load(@NotNull Pair<String, StructType> pair) {
return DeltaLakeParquetHandler.readParquetFile(pair.first, pair.second, hdfsConfiguration);
}
});
List<ColumnarBatch> columnarBatches = Lists.newArrayList();
new MockUp<DeltaLakeParquetHandler>() {
@Mock
public List<ColumnarBatch> readParquetFile(@NotNull String path, @NotNull StructType schema,
@NotNull Configuration hdfsConfiguration) {
return columnarBatches;
}
};
List<StructField> fields = ImmutableList.of(
new StructField("col1", IntegerType.INTEGER, true),
new StructField("col2", StringType.STRING, true)
);
StructType deltaType = new io.delta.kernel.types.StructType(fields);
String location1 = "hdfs://127.0.0.1:9000/delta_lake/00000000000000000030.checkpoint.parquet.1";
Pair<String, StructType> pair1 = Pair.create(location1, deltaType);
checkpointCache.get(pair1);
Assert.assertEquals(1, checkpointCache.size());
String location2 = "hdfs://127.0.0.1:9000/delta_lake/00000000000000000030.checkpoint.parquet.2";
Pair<String, StructType> pair2 = Pair.create(location2, deltaType);
checkpointCache.get(pair2);
Assert.assertEquals(1, checkpointCache.size());
Assert.assertFalse(checkpointCache.asMap().containsKey(pair1));
Assert.assertTrue(checkpointCache.asMap().containsKey(pair2));
}
|
public static String replaceProperty(String expression, Map<String, String> params) {
return replaceProperty(expression, new InmemoryConfiguration(params));
}
|
@Test
void testReplaceProperty() throws Exception {
String s = ConfigUtils.replaceProperty("1${a.b.c}2${a.b.c}3", Collections.singletonMap("a.b.c", "ABC"));
assertEquals("1ABC2ABC3", s);
s = ConfigUtils.replaceProperty("1${a.b.c}2${a.b.c}3", Collections.<String, String>emptyMap());
assertEquals("1${a.b.c}2${a.b.c}3", s);
}
|
public static String getMigrationsDir(
final String configFilePath,
final MigrationConfig config
) {
final String migrationsDir = config.getString(MigrationConfig.KSQL_MIGRATIONS_DIR_OVERRIDE);
if (migrationsDir != null && !migrationsDir.isEmpty()) {
return migrationsDir;
} else {
return getMigrationsDirFromConfigFile(configFilePath);
}
}
|
@Test
public void shouldDefaultMigrationsDirBasedOnConfigPath() {
// Given:
when(config.getString(MigrationConfig.KSQL_MIGRATIONS_DIR_OVERRIDE)).thenReturn("");
// When / Then:
assertThat(MigrationsDirectoryUtil.getMigrationsDir(migrationsConfigPath, config),
is(Paths.get(testDir, MigrationsDirectoryUtil.MIGRATIONS_DIR).toString()));
}
|
@VisibleForTesting
static ResolvingDecoder resolve(Decoder decoder, Schema readSchema, Schema fileSchema)
throws IOException {
Map<Schema, Map<Schema, ResolvingDecoder>> cache = DECODER_CACHES.get();
Map<Schema, ResolvingDecoder> fileSchemaToResolver =
cache.computeIfAbsent(readSchema, k -> new WeakHashMap<>());
ResolvingDecoder resolver =
fileSchemaToResolver.computeIfAbsent(fileSchema, schema -> newResolver(readSchema, schema));
resolver.configure(decoder);
return resolver;
}
|
@SuppressWarnings("UnusedAssignment") // the unused assignments are necessary for this test
@Test
public void testDecoderCachingReadSchemaSameAsFileSchema() throws Exception {
Decoder dummyDecoder = DecoderFactory.get().binaryDecoder(new byte[] {}, null);
Schema fileSchema = avroSchema();
ResolvingDecoder resolvingDecoder =
DecoderResolver.resolve(dummyDecoder, fileSchema, fileSchema);
assertThat(DecoderResolver.DECODER_CACHES.get()).hasSize(1);
assertThat(DecoderResolver.DECODER_CACHES.get().get(fileSchema)).hasSize(1);
checkCached(fileSchema, fileSchema);
// Equal but new one
Schema fileSchema1 = avroSchema();
assertThat(fileSchema1).isEqualTo(fileSchema);
ResolvingDecoder resolvingDecoder1 =
DecoderResolver.resolve(dummyDecoder, fileSchema1, fileSchema1);
assertThat(resolvingDecoder1).isNotSameAs(resolvingDecoder);
assertThat(DecoderResolver.DECODER_CACHES.get()).hasSize(2);
assertThat(DecoderResolver.DECODER_CACHES.get().get(fileSchema1)).hasSize(1);
checkCached(fileSchema1, fileSchema1);
// New one
Schema fileSchema2 = avroSchema("manifest_path", "manifest_length");
ResolvingDecoder resolvingDecoder2 =
DecoderResolver.resolve(dummyDecoder, fileSchema2, fileSchema2);
assertThat(resolvingDecoder2).isNotSameAs(resolvingDecoder);
assertThat(DecoderResolver.DECODER_CACHES.get()).hasSize(3);
assertThat(DecoderResolver.DECODER_CACHES.get().get(fileSchema2)).hasSize(1);
checkCached(fileSchema2, fileSchema2);
checkCachedSize(3);
fileSchema = null;
checkCachedSize(2);
fileSchema1 = null;
checkCachedSize(1);
fileSchema2 = null;
checkCachedSize(0);
}
|
Flux<Post> findAll() {
return Flux.fromIterable(data.values());
}
|
@Test
public void testGetAllPosts() {
StepVerifier.create(posts.findAll())
.consumeNextWith(p -> assertTrue(p.getTitle().equals("post one")))
.consumeNextWith(p -> assertTrue(p.getTitle().equals("post two")))
.expectComplete()
.verify();
}
|
public static Collection<PValue> nonAdditionalInputs(AppliedPTransform<?, ?, ?> application) {
ImmutableList.Builder<PValue> mainInputs = ImmutableList.builder();
PTransform<?, ?> transform = application.getTransform();
for (Map.Entry<TupleTag<?>, PCollection<?>> input : application.getInputs().entrySet()) {
if (!transform.getAdditionalInputs().containsKey(input.getKey())) {
mainInputs.add(input.getValue());
}
}
checkArgument(
!mainInputs.build().isEmpty() || application.getInputs().isEmpty(),
"Expected at least one main input if any inputs exist");
return mainInputs.build();
}
|
@Test
public void nonAdditionalInputsWithOnlyAdditionalInputsThrows() {
Map<TupleTag<?>, PCollection<?>> additionalInputs = new HashMap<>();
additionalInputs.put(new TupleTag<String>() {}, pipeline.apply(Create.of("1, 2", "3")));
additionalInputs.put(new TupleTag<Long>() {}, pipeline.apply(GenerateSequence.from(3L)));
AppliedPTransform<PInput, POutput, TestTransform> transform =
AppliedPTransform.of(
"additional-only",
additionalInputs,
Collections.emptyMap(),
new TestTransform((Map) additionalInputs),
ResourceHints.create(),
pipeline);
thrown.expect(IllegalArgumentException.class);
thrown.expectMessage("at least one");
TransformInputs.nonAdditionalInputs(transform);
}
|
public static int gt0(int value, String name) {
return (int) gt0((long) value, name);
}
|
@Test
public void checkGTZeroGreater() {
assertEquals(Check.gt0(120, "test"), 120);
}
|
@Override
public GenericRecordBuilder newRecordBuilder() {
return new ProtobufNativeRecordBuilderImpl(this);
}
|
@Test
public void testClazzBasedReaderByClazzGenericWriterSchema() {
genericmessage = genericProtobufNativeSchema.newRecordBuilder().set("stringField", STRING_FIELD_VLUE).set("doubleField", DOUBLE_FIELD_VLUE).build();
byte[] messageBytes = genericProtobufNativeSchema.encode(genericmessage);
message = clazzBasedProtobufNativeSchema.decode(messageBytes);
assertEquals(message.getStringField(), STRING_FIELD_VLUE);
assertEquals(message.getDoubleField(), DOUBLE_FIELD_VLUE);
}
|
public void setComplexProperty(String name, Object complexProperty) {
String dName = Introspector.decapitalize(name);
PropertyDescriptor propertyDescriptor = getPropertyDescriptor(dName);
if (propertyDescriptor == null) {
addWarn("Could not find PropertyDescriptor for [" + name + "] in "
+ objClass.getName());
return;
}
Method setter = propertyDescriptor.getWriteMethod();
if (setter == null) {
addWarn("Not setter method for property [" + name + "] in "
+ obj.getClass().getName());
return;
}
Class<?>[] paramTypes = setter.getParameterTypes();
if (!isSanityCheckSuccessful(name, setter, paramTypes, complexProperty)) {
return;
}
try {
invokeMethodWithSingleParameterOnThisObject(setter, complexProperty);
} catch (Exception e) {
addError("Could not set component " + obj + " for parent component "
+ obj, e);
}
}
|
@Test
public void testSetComplexProperty() {
Door door = new Door();
setter.setComplexProperty("door", door);
assertEquals(door, house.getDoor());
}
|
public int deliverAll(Set<EmailDeliveryRequest> deliveries) {
if (deliveries.isEmpty() || !isActivated()) {
LOG.debug(SMTP_HOST_NOT_CONFIGURED_DEBUG_MSG);
return 0;
}
return (int) deliveries.stream()
.filter(t -> !t.recipientEmail().isBlank())
.map(t -> {
EmailMessage emailMessage = format(t.notification());
if (emailMessage != null) {
emailMessage.setTo(t.recipientEmail());
return deliver(emailMessage);
}
return false;
})
.filter(Boolean::booleanValue)
.count();
}
|
@Test
public void deliverAll_has_no_effect_if_set_is_empty() {
EmailSmtpConfiguration emailSettings = mock(EmailSmtpConfiguration.class);
EmailNotificationChannel emailNotificationChannel = new EmailNotificationChannel(emailSettings, server, null, null);
int count = emailNotificationChannel.deliverAll(Collections.emptySet());
assertThat(count).isZero();
verifyNoInteractions(emailSettings);
assertThat(smtpServer.getMessages()).isEmpty();
}
|
public TimelineEvent deleteWorkflow(String workflowId, User author) {
if (IdHelper.isInlineWorkflowId(workflowId)) {
throw new MaestroUnprocessableEntityException(
"Cannot delete an inline foreach workflow [%s], please delete its parent concrete workflow instead.",
workflowId);
}
final int nonTerminalCnt = Arrays.stream(getIndexedInstanceCount(workflowId, false)).sum();
Checks.checkTrue(
nonTerminalCnt == 0,
"Cannot delete the workflow [%s] "
+ "because there are still [%s] number of queued or running workflow instances."
+ "Please delete it after taking care of them, e.g. stopping all of them.",
workflowId,
nonTerminalCnt);
TimelineEvent info =
TimelineLogEvent.info(
"The workflow is deleted by [%s]. "
+ "All associated workflow data (e.g. versions and instances) will be deleted shortly.",
author.getName());
final String infoString = toJson(info);
Long res =
withRetryableStatement(
DELETE_MAESTRO_WORKFLOW_QUERY,
stmt -> {
int idx = 0;
stmt.setString(++idx, workflowId);
stmt.setString(++idx, workflowId);
stmt.setString(++idx, infoString);
try (ResultSet result =
stmt.executeQuery()) { // unnecessary, to avoid PMD false positive
if (result.next()) {
long internalId = result.getLong(1);
Checks.checkTrue(
!result.next(),
"Aborting the deletion as there is already a deletion task in progress for workflow [%s]",
workflowId);
publisher.publishOrThrow(
DeleteWorkflowJobEvent.create(workflowId, internalId, author),
"Failed to publish maestro delete job event for workflow: " + workflowId);
return internalId;
}
return null;
}
});
if (res == null) {
throw new MaestroNotFoundException(
"No workflow is deleted because workflow [%s] is non-existing or has queued or running instances.",
workflowId);
}
LOG.info(
"User [{}] deleted workflow [{}] with a unique internalId [{}]. Send a delete job event to remove data",
author.getName(),
workflowId,
res);
return info;
}
|
@Test
public void testDeleteWorkflow() throws Exception {
WorkflowDefinition wfd = loadWorkflow(TEST_WORKFLOW_ID1);
workflowDao.addWorkflowDefinition(wfd, wfd.getPropertiesSnapshot().extractProperties());
WorkflowDefinition def = workflowDao.getWorkflowDefinition(TEST_WORKFLOW_ID1, "latest");
assertNotNull(wfd.getInternalId());
assertNotNull(def.getInternalId());
assertEquals(wfd.getInternalId(), def.getInternalId());
assertEquals(wfd.getWorkflow(), def.getWorkflow());
workflowDao.deleteWorkflow(TEST_WORKFLOW_ID1, User.create("tester"));
verify(publisher, times(1)).publishOrThrow(any(DeleteWorkflowJobEvent.class), any());
reset(publisher);
AssertHelper.assertThrows(
"The workflow should have been deleted",
MaestroNotFoundException.class,
"has not been created yet or has been deleted",
() -> workflowDao.getWorkflowDefinition(TEST_WORKFLOW_ID1, "latest"));
}
|
@Override
public void start() {
Collection<ServerPluginInfo> loadedPlugins = pluginJarLoader.loadPlugins();
logInstalledPlugins(loadedPlugins);
Collection<ExplodedPlugin> explodedPlugins = extractPlugins(loadedPlugins);
Map<String, Plugin> instancesByKey = pluginClassLoader.load(explodedPlugins);
Map<String, PluginType> typesByKey = getTypesByKey(loadedPlugins);
List<ServerPlugin> plugins = createServerPlugins(explodedPlugins, instancesByKey, typesByKey);
pluginRepository.addPlugins(plugins);
}
|
@Test
public void load_plugins() throws IOException {
ServerPluginInfo p1 = newPluginInfo("p1");
ServerPluginInfo p2 = newPluginInfo("p2");
when(jarLoader.loadPlugins()).thenReturn(Arrays.asList(p1, p2));
when(jarExploder.explode(p1)).thenReturn(new ExplodedPlugin(p1, "p1", new File("p1Exploded.jar"), Collections.singletonList(new File("libP1.jar"))));
when(jarExploder.explode(p2)).thenReturn(new ExplodedPlugin(p2, "p2", new File("p2Exploded.jar"), Collections.singletonList(new File("libP2.jar"))));
Map<String, Plugin> instances = ImmutableMap.of("p1", mock(Plugin.class), "p2", mock(Plugin.class));
when(pluginClassLoader.load(anyList())).thenReturn(instances);
underTest.start();
assertEquals(2, pluginRepository.getPlugins().size());
assertEquals(p1, pluginRepository.getPlugin("p1").getPluginInfo());
assertEquals(newFileAndMd5(p1.getNonNullJarFile()).getFile(), pluginRepository.getPlugin("p1").getJar().getFile());
assertEquals(newFileAndMd5(p1.getNonNullJarFile()).getMd5(), pluginRepository.getPlugin("p1").getJar().getMd5());
assertEquals(instances.get("p1"), pluginRepository.getPlugin("p1").getInstance());
assertEquals(p2, pluginRepository.getPlugin("p2").getPluginInfo());
assertEquals(newFileAndMd5(p2.getNonNullJarFile()).getFile(), pluginRepository.getPlugin("p2").getJar().getFile());
assertEquals(newFileAndMd5(p2.getNonNullJarFile()).getMd5(), pluginRepository.getPlugin("p2").getJar().getMd5());
assertEquals(instances.get("p2"), pluginRepository.getPlugin("p2").getInstance());
assertThat(pluginRepository.getPlugins()).extracting(ServerPlugin::getPluginInfo)
.allMatch(p -> logTester.logs().contains(String.format("Deploy %s / %s / %s", p.getName(), p.getVersion(), p.getImplementationBuild())));
}
|
public boolean hasCycle() {
Set<Node> looped = findCycles();
if (looped.isEmpty()) {
return false;
}
Set<Node> checkingSet = new HashSet<>(looped);
checkingSet.retainAll(startingNodes);
if (!checkingSet.isEmpty()) {
// a starting node is part of the loop
return true;
}
Deque<Node> processingQueue = new ArrayDeque<>();
for (Node n : startingNodes) {
processingQueue.push(n);
do {
Node current = processingQueue.pop();
if (looped.contains(current)) {
// there is a path leading from a starting node to the detected loop
return true;
}
for (Edge e : current.outgoing) {
processingQueue.push(e.to);
}
} while (!processingQueue.isEmpty());
}
return false;
}
|
@Test
void hasCycle() {
CallGraph callGraph = new CallGraph();
callGraph.addEdge("a", "b");
callGraph.addEdge("a", "c");
callGraph.addEdge("b", "d");
callGraph.addEdge("c", "d");
callGraph.addEdge("c", "b");
callGraph.addEdge("d", "b");
callGraph.addStarting(new CallGraph.Node("a"));
assertTrue(callGraph.hasCycle());
}
|
static boolean isSshUrl(@Nullable String remote) {
return remote != null && SSH_URL_PATTERN.matcher(remote).matches();
}
|
@Test
public void testSshUrlChecker() {
Assert.assertTrue(GitUtils.isSshUrl("ssh://some-host/some-path"));
Assert.assertTrue(GitUtils.isSshUrl("ssh://some-host/some-path/more"));
Assert.assertTrue(GitUtils.isSshUrl("ssh://some-host:port/some-path/more"));
Assert.assertTrue(GitUtils.isSshUrl("ssh://user@some-host/some-path/more"));
Assert.assertTrue(GitUtils.isSshUrl("ssh://user@some-host:port/some-path/more"));
Assert.assertTrue(GitUtils.isSshUrl("user@some-host:some-path"));
Assert.assertTrue(GitUtils.isSshUrl("user@some-host:some-path/more"));
Assert.assertTrue(!GitUtils.isSshUrl("proto://user@some-host/some-path"));
Assert.assertTrue(!GitUtils.isSshUrl("proto://user@some-host:port/some-path"));
Assert.assertTrue(!GitUtils.isSshUrl("proto://user@some-host/some-path/more"));
Assert.assertTrue(!GitUtils.isSshUrl("proto://user@some-host:port/some-path/more"));
Assert.assertTrue(!GitUtils.isSshUrl("https://user@some-host/some-path"));
Assert.assertTrue(!GitUtils.isSshUrl("https://user@some-host:port/some-path"));
Assert.assertTrue(!GitUtils.isSshUrl("https://user@some-host/some-path/more"));
Assert.assertTrue(!GitUtils.isSshUrl("https://user@some-host:port/some-path/more"));
}
|
public static boolean canDrop(
FilterPredicate pred, List<ColumnChunkMetaData> columns, DictionaryPageReadStore dictionaries) {
Objects.requireNonNull(pred, "pred cannnot be null");
Objects.requireNonNull(columns, "columns cannnot be null");
return pred.accept(new DictionaryFilter(columns, dictionaries));
}
|
@Test
public void testContainsAnd() throws Exception {
BinaryColumn col = binaryColumn("binary_field");
// both evaluate to false (no upper-case letters are in the dictionary)
Operators.Contains<Binary> B = contains(eq(col, Binary.fromString("B")));
Operators.Contains<Binary> C = contains(eq(col, Binary.fromString("C")));
// both evaluate to true (all lower-case letters are in the dictionary)
Operators.Contains<Binary> x = contains(eq(col, Binary.fromString("x")));
Operators.Contains<Binary> y = contains(eq(col, Binary.fromString("y")));
assertTrue("Should drop when either predicate must be false", canDrop(and(B, y), ccmd, dictionaries));
assertTrue("Should drop when either predicate must be false", canDrop(and(x, C), ccmd, dictionaries));
assertTrue("Should drop when either predicate must be false", canDrop(and(B, C), ccmd, dictionaries));
assertFalse("Should not drop when either predicate could be true", canDrop(and(x, y), ccmd, dictionaries));
}
|
@SuppressWarnings("deprecation")
@VisibleForTesting
public String getWebSocketReadUri(String topic) {
String serviceURLWithoutTrailingSlash = serviceURL.substring(0,
serviceURL.endsWith("/") ? serviceURL.length() - 1 : serviceURL.length());
TopicName topicName = TopicName.get(topic);
String wsTopic;
if (topicName.isV2()) {
wsTopic = String.format("%s/%s/%s/%s", topicName.getDomain(), topicName.getTenant(),
topicName.getNamespacePortion(), topicName.getLocalName());
} else {
wsTopic = String.format("%s/%s/%s/%s/%s", topicName.getDomain(), topicName.getTenant(),
topicName.getCluster(), topicName.getNamespacePortion(), topicName.getLocalName());
}
String msgIdQueryParam;
if ("latest".equals(startMessageId) || "earliest".equals(startMessageId)) {
msgIdQueryParam = startMessageId;
} else {
MessageId msgId = parseMessageId(startMessageId);
msgIdQueryParam = Base64.getEncoder().encodeToString(msgId.toByteArray());
}
String uriFormat = "%s/ws" + (topicName.isV2() ? "/v2/" : "/") + "reader/%s?messageId=%s";
return String.format(uriFormat, serviceURLWithoutTrailingSlash, wsTopic, msgIdQueryParam);
}
|
@Test(dataProvider = "startMessageIds")
public void testGetWebSocketReadUri(String msgId, String msgIdQueryParam) throws Exception {
CmdRead cmdRead = new CmdRead();
cmdRead.updateConfig(null, null, "ws://localhost:8080/");
Field startMessageIdField = CmdRead.class.getDeclaredField("startMessageId");
startMessageIdField.setAccessible(true);
startMessageIdField.set(cmdRead, msgId);
String topicNameV1 = "persistent://public/cluster/default/t1";
assertEquals(cmdRead.getWebSocketReadUri(topicNameV1),
"ws://localhost:8080/ws/reader/persistent/public/cluster/default/t1?messageId=" + msgIdQueryParam);
String topicNameV2 = "persistent://public/default/t2";
assertEquals(cmdRead.getWebSocketReadUri(topicNameV2),
"ws://localhost:8080/ws/v2/reader/persistent/public/default/t2?messageId=" + msgIdQueryParam);
}
|
public static Expression convert(Filter[] filters) {
Expression expression = Expressions.alwaysTrue();
for (Filter filter : filters) {
Expression converted = convert(filter);
Preconditions.checkArgument(
converted != null, "Cannot convert filter to Iceberg: %s", filter);
expression = Expressions.and(expression, converted);
}
return expression;
}
|
@Test
public void testLocalDateTimeFilterConversion() {
LocalDateTime ldt = LocalDateTime.parse("2018-10-18T00:00:57");
long epochMicros =
ChronoUnit.MICROS.between(LocalDateTime.ofInstant(Instant.EPOCH, ZoneId.of("UTC")), ldt);
Expression instantExpression = SparkFilters.convert(GreaterThan.apply("x", ldt));
Expression rawExpression = Expressions.greaterThan("x", epochMicros);
assertThat(instantExpression.toString())
.as("Generated Instant expression should be correct")
.isEqualTo(rawExpression.toString());
}
|
@Override
public DataNodeDto resetNode(String nodeId) throws NodeNotFoundException {
final DataNodeDto node = nodeService.byNodeId(nodeId);
if (node.getDataNodeStatus() != DataNodeStatus.REMOVED) {
throw new IllegalArgumentException("Only previously removed data nodes can rejoin the cluster.");
}
DataNodeLifecycleEvent e = DataNodeLifecycleEvent.create(node.getNodeId(), DataNodeLifecycleTrigger.RESET);
clusterEventBus.post(e);
return node;
}
|
@Test
public void resetNodePublishesClusterEvent() throws NodeNotFoundException {
final String testNodeId = "node";
nodeService.registerServer(buildTestNode(testNodeId, DataNodeStatus.REMOVED));
classUnderTest.resetNode(testNodeId);
verify(clusterEventBus).post(DataNodeLifecycleEvent.create(testNodeId, DataNodeLifecycleTrigger.RESET));
}
|
public static CloudConfiguration buildCloudConfigurationForStorage(Map<String, String> properties) {
return buildCloudConfigurationForStorage(properties, false);
}
|
@Test
public void testGCPCloudConfiguration() {
Map<String, String> map = new HashMap<String, String>() {
{
put(CloudConfigurationConstants.GCP_GCS_SERVICE_ACCOUNT_PRIVATE_KEY, "XX");
put(CloudConfigurationConstants.GCP_GCS_SERVICE_ACCOUNT_PRIVATE_KEY_ID, "XX");
put(CloudConfigurationConstants.GCP_GCS_SERVICE_ACCOUNT_EMAIL, "XX");
put(CloudConfigurationConstants.GCP_GCS_SERVICE_ACCOUNT_IMPERSONATION_SERVICE_ACCOUNT, "XX");
put(CloudConfigurationConstants.GCP_GCS_USE_COMPUTE_ENGINE_SERVICE_ACCOUNT, "XX");
}
};
CloudConfiguration cc = CloudConfigurationFactory.buildCloudConfigurationForStorage(map);
Assert.assertEquals(cc.getCloudType(), CloudType.GCP);
TCloudConfiguration tc = new TCloudConfiguration();
cc.toThrift(tc);
Configuration conf = new Configuration();
cc.applyToConfiguration(conf);
cc.toFileStoreInfo();
Assert.assertEquals(cc.toConfString(),
"GCPCloudConfiguration{resources='', jars='', hdpuser='', " +
"cred=GCPCloudCredential{useComputeEngineServiceAccount=false, " +
"serviceAccountEmail='XX', serviceAccountPrivateKeyId='XX', serviceAccountPrivateKey='XX', " +
"impersonationServiceAccount='XX'}}");
}
|
@Override
/**
* Parses the given text to transform it to the desired target type.
* @param text The LLM output in string format.
* @return The parsed output in the desired target type.
*/
public T convert(@NonNull String text) {
try {
// Remove leading and trailing whitespace
text = text.trim();
// Check for and remove triple backticks and "json" identifier
if (text.startsWith("```") && text.endsWith("```")) {
// Remove the first line if it contains "```json"
String[] lines = text.split("\n", 2);
if (lines[0].trim().equalsIgnoreCase("```json")) {
text = lines.length > 1 ? lines[1] : "";
}
else {
text = text.substring(3); // Remove leading ```
}
// Remove trailing ```
text = text.substring(0, text.length() - 3);
// Trim again to remove any potential whitespace
text = text.trim();
}
return (T) this.objectMapper.readValue(text, this.typeRef);
}
catch (JsonProcessingException e) {
logger.error("Could not parse the given text to the desired target type:" + text + " into " + this.typeRef);
throw new RuntimeException(e);
}
}
|
@Test
public void convertClassType() {
var converter = new BeanOutputConverter<>(TestClass.class);
var testClass = converter.convert("{ \"someString\": \"some value\" }");
assertThat(testClass.getSomeString()).isEqualTo("some value");
}
|
@Override
public Optional<SchemaDescription> getSchema(String topic, Target type) {
String subject = schemaSubject(topic, type);
return getSchemaBySubject(subject)
.flatMap(schemaMetadata ->
//schema can be not-found, when schema contexts configured improperly
getSchemaById(schemaMetadata.getId())
.map(parsedSchema ->
new SchemaDescription(
convertSchema(schemaMetadata, parsedSchema),
Map.of(
"subject", subject,
"schemaId", schemaMetadata.getId(),
"latestVersion", schemaMetadata.getVersion(),
"type", schemaMetadata.getSchemaType() // AVRO / PROTOBUF / JSON
)
)));
}
|
@Test
void returnsEmptyDescriptorIfSchemaNotRegisteredInSR() {
String topic = "test";
assertThat(serde.getSchema(topic, Serde.Target.KEY)).isEmpty();
assertThat(serde.getSchema(topic, Serde.Target.VALUE)).isEmpty();
}
|
@Override
public MongoDbConnectorEmbeddedDebeziumConfiguration getConfiguration() {
return configuration;
}
|
@Test
void testIfConnectorEndpointCreatedWithConfig() throws Exception {
final Map<String, Object> params = new HashMap<>();
params.put("offsetStorageFileName", "/offset_test_file");
params.put("mongodbConnectionString", "mongodb://localhost:27017/?replicaSet=rs0");
params.put("mongodbUser", "dbz");
params.put("mongodbPassword", "pwd");
params.put("topicPrefix", "test");
params.put("schemaHistoryInternalFileFilename", "/db_history_file_test");
final String remaining = "test_name";
final String uri = "debezium?name=test_name&offsetStorageFileName=/test&"
+ "databaseHostname=localhost&databaseServerId=1234&databaseUser=dbz&databasePassword=pwd&"
+ "databaseServerName=test&schemaHistoryInternalFileFilename=/test";
try (final DebeziumComponent debeziumComponent = new DebeziumMongodbComponent(new DefaultCamelContext())) {
debeziumComponent.start();
final DebeziumEndpoint debeziumEndpoint = debeziumComponent.createEndpoint(uri, remaining, params);
assertNotNull(debeziumEndpoint);
// test for config
final MongoDbConnectorEmbeddedDebeziumConfiguration configuration
= (MongoDbConnectorEmbeddedDebeziumConfiguration) debeziumEndpoint.getConfiguration();
assertEquals("test_name", configuration.getName());
assertEquals("/offset_test_file", configuration.getOffsetStorageFileName());
assertEquals("mongodb://localhost:27017/?replicaSet=rs0", configuration.getMongodbConnectionString());
assertEquals("dbz", configuration.getMongodbUser());
assertEquals("pwd", configuration.getMongodbPassword());
assertEquals("test", configuration.getTopicPrefix());
assertEquals("/db_history_file_test", configuration.getSchemaHistoryInternalFileFilename());
}
}
|
@SuppressWarnings("ResultOfMethodCallIgnored")
@SneakyThrows(InterruptedException.class)
public void doAwait(final ChannelHandlerContext context) {
while (!context.channel().isWritable() && context.channel().isActive()) {
context.flush();
lock.lock();
try {
condition.await(DEFAULT_TIMEOUT_MILLISECONDS, TimeUnit.MILLISECONDS);
} finally {
lock.unlock();
}
}
}
|
@Test
void assertDoAwait() throws NoSuchFieldException, IllegalAccessException {
when(channel.isWritable()).thenReturn(false);
when(channel.isActive()).thenReturn(true);
when(channelHandlerContext.channel()).thenReturn(channel);
ExecutorService executorService = Executors.newFixedThreadPool(1);
executorService.submit(() -> resourceLock.doAwait(channelHandlerContext));
Awaitility.await().pollDelay(200L, TimeUnit.MILLISECONDS).until(() -> true);
Plugins.getMemberAccessor().set(ResourceLock.class.getDeclaredField("condition"), resourceLock, new ReentrantLock().newCondition());
verify(resourceLock, times(1)).doAwait(channelHandlerContext);
}
|
@PublicAPI(usage = ACCESS)
public static PackageMatchers of(String... packageIdentifiers) {
return of(ImmutableSet.copyOf(packageIdentifiers));
}
|
@Test
public void matches_any_package() {
assertThat(PackageMatchers.of("..match..", "..other.."))
.accepts("foo.match.bar")
.accepts("foo.other.bar")
.accepts("foo.match.other.bar")
.rejects("foo.bar")
.rejects("matc.hother");
}
|
public int getEstimatedRequestedSegmentsUsage() {
int totalNumberOfMemorySegments = getTotalNumberOfMemorySegments();
return totalNumberOfMemorySegments == 0
? 0
: Math.toIntExact(
100L
* getEstimatedNumberOfRequestedMemorySegments()
/ totalNumberOfMemorySegments);
}
|
@Test
void testEmptyPoolSegmentsUsage() throws IOException {
try (CloseableRegistry closeableRegistry = new CloseableRegistry()) {
NetworkBufferPool globalPool = new NetworkBufferPool(0, 128);
closeableRegistry.registerCloseable(globalPool::destroy);
assertThat(globalPool.getEstimatedRequestedSegmentsUsage()).isZero();
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.