focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@VisibleForTesting
public static BlockListAsLongs encode(
final Collection<? extends Replica> replicas) {
BlockListAsLongs.Builder builder = builder(IPC_MAXIMUM_DATA_LENGTH_DEFAULT);
for (Replica replica : replicas) {
builder.add(replica);
}
return builder.build();
}
|
@Test
public void testDatanodeDetect() throws ServiceException, IOException {
final AtomicReference<BlockReportRequestProto> request =
new AtomicReference<>();
// just capture the outgoing PB
DatanodeProtocolPB mockProxy = mock(DatanodeProtocolPB.class);
doAnswer(new Answer<BlockReportResponseProto>() {
public BlockReportResponseProto answer(InvocationOnMock invocation) {
Object[] args = invocation.getArguments();
request.set((BlockReportRequestProto) args[1]);
return BlockReportResponseProto.newBuilder().build();
}
}).when(mockProxy).blockReport(any(), any(BlockReportRequestProto.class));
@SuppressWarnings("resource")
DatanodeProtocolClientSideTranslatorPB nn =
new DatanodeProtocolClientSideTranslatorPB(mockProxy);
DatanodeRegistration reg = DFSTestUtil.getLocalDatanodeRegistration();
NamespaceInfo nsInfo = new NamespaceInfo(1, "cluster", "bp", 1);
reg.setNamespaceInfo(nsInfo);
Replica r = new FinalizedReplica(new Block(1, 2, 3), null, null);
BlockListAsLongs bbl = BlockListAsLongs.encode(Collections.singleton(r));
DatanodeStorage storage = new DatanodeStorage("s1");
StorageBlockReport[] sbr = { new StorageBlockReport(storage, bbl) };
// check DN sends new-style BR
request.set(null);
nsInfo.setCapabilities(Capability.STORAGE_BLOCK_REPORT_BUFFERS.getMask());
nn.blockReport(reg, "pool", sbr,
new BlockReportContext(1, 0, System.nanoTime(), 0L));
BlockReportRequestProto proto = request.get();
assertNotNull(proto);
assertTrue(proto.getReports(0).getBlocksList().isEmpty());
assertFalse(proto.getReports(0).getBlocksBuffersList().isEmpty());
// back up to prior version and check DN sends old-style BR
request.set(null);
nsInfo.setCapabilities(Capability.UNKNOWN.getMask());
BlockListAsLongs blockList = getBlockList(r);
StorageBlockReport[] obp = new StorageBlockReport[] {
new StorageBlockReport(new DatanodeStorage("s1"), blockList) };
nn.blockReport(reg, "pool", obp,
new BlockReportContext(1, 0, System.nanoTime(), 0L));
proto = request.get();
assertNotNull(proto);
assertFalse(proto.getReports(0).getBlocksList().isEmpty());
assertTrue(proto.getReports(0).getBlocksBuffersList().isEmpty());
}
|
@Override
protected JsonObject convert(final JsonObject data) {
return data.getAsJsonObject(ConfigGroupEnum.PLUGIN.name());
}
|
@Test
public void testConvert() {
JsonObject jsonObject = new JsonObject();
JsonObject expectJsonObject = new JsonObject();
jsonObject.add(ConfigGroupEnum.PLUGIN.name(), expectJsonObject);
assertThat(mockPluginDataRefresh.convert(jsonObject), is(expectJsonObject));
}
|
public Future<Void> maybeRollingUpdate(Reconciliation reconciliation, int replicas, Labels selectorLabels, Function<Pod, List<String>> podRestart, TlsPemIdentity coTlsPemIdentity) {
String namespace = reconciliation.namespace();
// We prepare the list of expected Pods. This is needed as we need to account for pods which might be missing.
// We need to wait for them before rolling any running pods to avoid problems.
List<String> expectedPodNames = new ArrayList<>();
for (int i = 0; i < replicas; i++) {
expectedPodNames.add(KafkaResources.zookeeperPodName(reconciliation.name(), i));
}
return podOperator.listAsync(namespace, selectorLabels)
.compose(pods -> {
ZookeeperClusterRollContext clusterRollContext = new ZookeeperClusterRollContext();
for (String podName : expectedPodNames) {
Pod pod = pods.stream().filter(p -> podName.equals(p.getMetadata().getName())).findFirst().orElse(null);
if (pod != null) {
List<String> restartReasons = podRestart.apply(pod);
final boolean ready = podOperator.isReady(namespace, pod.getMetadata().getName());
ZookeeperPodContext podContext = new ZookeeperPodContext(podName, restartReasons, true, ready);
if (restartReasons != null && !restartReasons.isEmpty()) {
LOGGER.debugCr(reconciliation, "Pod {} should be rolled due to {}", podContext.getPodName(), restartReasons);
} else {
LOGGER.debugCr(reconciliation, "Pod {} does not need to be rolled", podContext.getPodName());
}
clusterRollContext.add(podContext);
} else {
// Pod does not exist, but we still add it to the roll context because we should not roll
// any other pods before it is ready
LOGGER.debugCr(reconciliation, "Pod {} does not exist and cannot be rolled", podName);
ZookeeperPodContext podContext = new ZookeeperPodContext(podName, null, false, false);
clusterRollContext.add(podContext);
}
}
if (clusterRollContext.requiresRestart()) {
return Future.succeededFuture(clusterRollContext);
} else {
return Future.succeededFuture(null);
}
}).compose(clusterRollContext -> {
if (clusterRollContext != null) {
Promise<Void> promise = Promise.promise();
Future<String> leaderFuture = leaderFinder.findZookeeperLeader(reconciliation, clusterRollContext.podNames(), coTlsPemIdentity);
leaderFuture.compose(leader -> {
LOGGER.debugCr(reconciliation, "Zookeeper leader is " + (ZookeeperLeaderFinder.UNKNOWN_LEADER.equals(leader) ? "unknown" : "pod " + leader));
Future<Void> fut = Future.succeededFuture();
// Then roll each non-leader pod => the leader is rolled last
for (ZookeeperPodContext podContext : clusterRollContext.getPodContextsWithNonExistingAndNonReadyFirst()) {
if (podContext.requiresRestart() && !podContext.getPodName().equals(leader)) {
LOGGER.debugCr(reconciliation, "Pod {} needs to be restarted", podContext.getPodName());
// roll the pod and wait until it is ready
// this prevents rolling into faulty state (note: this applies just for ZK pods)
fut = fut.compose(ignore -> restartPod(reconciliation, podContext.getPodName(), podContext.reasonsToRestart));
} else {
if (podContext.requiresRestart()) {
LOGGER.debugCr(reconciliation, "Deferring restart of leader {}", podContext.getPodName());
} else {
LOGGER.debugCr(reconciliation, "Pod {} does not need to be restarted", podContext.getPodName());
}
fut = fut.compose(ignore -> podOperator.readiness(reconciliation, reconciliation.namespace(), podContext.getPodName(), READINESS_POLLING_INTERVAL_MS, operationTimeoutMs));
}
}
// Check if we have a leader and if it needs rolling
if (ZookeeperLeaderFinder.UNKNOWN_LEADER.equals(leader) || clusterRollContext.get(leader) == null || !clusterRollContext.get(leader).requiresRestart()) {
return fut;
} else {
// Roll the leader pod
return fut.compose(ar -> {
// the leader is rolled as the last
LOGGER.debugCr(reconciliation, "Restarting leader pod (previously deferred) {}", leader);
return restartPod(reconciliation, leader, clusterRollContext.get(leader).reasonsToRestart);
});
}
}).onComplete(promise);
return promise.future();
} else {
return Future.succeededFuture();
}
});
}
|
@Test
public void testNonReadyPodsAreRestartedFirst(VertxTestContext context) {
final String leaderPodReady = "name-zookeeper-2";
final String followerPodReady = "name-zookeeper-0";
final String followerPodNonReady = "name-zookeeper-1";
PodOperator podOperator = mock(PodOperator.class);
when(podOperator.isReady(any(), eq(followerPodReady))).thenReturn(true);
when(podOperator.isReady(any(), eq(followerPodNonReady))).thenReturn(false);
when(podOperator.isReady(any(), eq(leaderPodReady))).thenReturn(true);
when(podOperator.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(PODS));
when(podOperator.readiness(any(), any(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
ZookeeperLeaderFinder leaderFinder = mock(ZookeeperLeaderFinder.class);
when(leaderFinder.findZookeeperLeader(any(), any(), any())).thenReturn(Future.succeededFuture(leaderPodReady));
MockZooKeeperRoller roller = new MockZooKeeperRoller(podOperator, leaderFinder, 300_00L);
Function<Pod, List<String>> shouldRoll = pod -> List.of("Pod was manually annotated to be rolled");
roller.maybeRollingUpdate(Reconciliation.DUMMY_RECONCILIATION, 3, DUMMY_SELECTOR, shouldRoll, DUMMY_IDENTITY)
.onComplete(context.succeeding(v -> context.verify(() -> {
assertThat(roller.podRestarts.size(), is(3));
assertThat(roller.podRestarts, contains(followerPodNonReady, followerPodReady, leaderPodReady));
context.completeNow();
})));
}
|
public static NetFlowV9Packet parsePacket(ByteBuf bb, NetFlowV9FieldTypeRegistry typeRegistry) {
return parsePacket(bb, typeRegistry, Maps.newHashMap(), null);
}
|
@Test
public void testParseIncomplete() throws Exception {
final byte[] b = Resources.toByteArray(Resources.getResource("netflow-data/netflow-v9-3_incomplete.dat"));
assertThatExceptionOfType(EmptyTemplateException.class)
.isThrownBy(() -> NetFlowV9Parser.parsePacket(Unpooled.wrappedBuffer(b), typeRegistry));
}
|
@Override
public List<RedisClientInfo> getClientList(RedisClusterNode node) {
RedisClient entry = getEntry(node);
RFuture<List<String>> f = executorService.readAsync(entry, StringCodec.INSTANCE, RedisCommands.CLIENT_LIST);
List<String> list = syncFuture(f);
return CONVERTER.convert(list.toArray(new String[list.size()]));
}
|
@Test
public void testGetClientList() {
RedisClusterNode master = getFirstMaster();
List<RedisClientInfo> list = connection.getClientList(master);
assertThat(list.size()).isGreaterThan(10);
}
|
@Override
public void merge(ColumnStatisticsObj aggregateColStats, ColumnStatisticsObj newColStats) {
LOG.debug("Merging statistics: [aggregateColStats:{}, newColStats: {}]", aggregateColStats, newColStats);
TimestampColumnStatsDataInspector aggregateData = timestampInspectorFromStats(aggregateColStats);
TimestampColumnStatsDataInspector newData = timestampInspectorFromStats(newColStats);
Timestamp lowValue = mergeLowValue(getLowValue(aggregateData), getLowValue(newData));
if (lowValue != null) {
aggregateData.setLowValue(lowValue);
}
Timestamp highValue = mergeHighValue(getHighValue(aggregateData), getHighValue(newData));
if (highValue != null) {
aggregateData.setHighValue(highValue);
}
aggregateData.setNumNulls(mergeNumNulls(aggregateData.getNumNulls(), newData.getNumNulls()));
NumDistinctValueEstimator oldNDVEst = aggregateData.getNdvEstimator();
NumDistinctValueEstimator newNDVEst = newData.getNdvEstimator();
List<NumDistinctValueEstimator> ndvEstimatorsList = Arrays.asList(oldNDVEst, newNDVEst);
aggregateData.setNumDVs(mergeNumDistinctValueEstimator(aggregateColStats.getColName(),
ndvEstimatorsList, aggregateData.getNumDVs(), newData.getNumDVs()));
aggregateData.setNdvEstimator(ndvEstimatorsList.get(0));
KllHistogramEstimator oldKllEst = aggregateData.getHistogramEstimator();
KllHistogramEstimator newKllEst = newData.getHistogramEstimator();
aggregateData.setHistogramEstimator(mergeHistogramEstimator(aggregateColStats.getColName(), oldKllEst, newKllEst));
aggregateColStats.getStatsData().setTimestampStats(aggregateData);
}
|
@Test
public void testMergeNonNullValues() {
ColumnStatisticsObj aggrObj = createColumnStatisticsObj(new ColStatsBuilder<>(Timestamp.class)
.low(TS_2)
.high(TS_2)
.numNulls(2)
.numDVs(1)
.hll(TS_2.getSecondsSinceEpoch())
.kll(TS_2.getSecondsSinceEpoch())
.build());
ColumnStatisticsObj newObj = createColumnStatisticsObj(new ColStatsBuilder<>(Timestamp.class)
.low(TS_3)
.high(TS_3)
.numNulls(3)
.numDVs(1)
.hll(TS_3.getSecondsSinceEpoch())
.kll(TS_3.getSecondsSinceEpoch())
.build());
merger.merge(aggrObj, newObj);
newObj = createColumnStatisticsObj(new ColStatsBuilder<>(Timestamp.class)
.low(TS_1)
.high(TS_1)
.numNulls(1)
.numDVs(1)
.hll(TS_1.getSecondsSinceEpoch(), TS_1.getSecondsSinceEpoch())
.kll(TS_1.getSecondsSinceEpoch(), TS_1.getSecondsSinceEpoch())
.build());
merger.merge(aggrObj, newObj);
ColumnStatisticsData expectedColumnStatisticsData = new ColStatsBuilder<>(Timestamp.class)
.low(TS_1)
.high(TS_3)
.numNulls(6)
.numDVs(3)
.hll(TS_2.getSecondsSinceEpoch(), TS_3.getSecondsSinceEpoch(),
TS_1.getSecondsSinceEpoch(), TS_1.getSecondsSinceEpoch())
.kll(TS_2.getSecondsSinceEpoch(), TS_3.getSecondsSinceEpoch(),
TS_1.getSecondsSinceEpoch(), TS_1.getSecondsSinceEpoch())
.build();
assertEquals(expectedColumnStatisticsData, aggrObj.getStatsData());
}
|
@SuppressWarnings("DataFlowIssue")
public static CommandExecutor newInstance(final MySQLCommandPacketType commandPacketType, final CommandPacket commandPacket, final ConnectionSession connectionSession) throws SQLException {
if (commandPacket instanceof SQLReceivedPacket) {
log.debug("Execute packet type: {}, sql: {}", commandPacketType, ((SQLReceivedPacket) commandPacket).getSQL());
} else {
log.debug("Execute packet type: {}", commandPacketType);
}
switch (commandPacketType) {
case COM_QUIT:
return new MySQLComQuitExecutor();
case COM_INIT_DB:
return new MySQLComInitDbExecutor((MySQLComInitDbPacket) commandPacket, connectionSession);
case COM_FIELD_LIST:
return new MySQLComFieldListPacketExecutor((MySQLComFieldListPacket) commandPacket, connectionSession);
case COM_QUERY:
return new MySQLComQueryPacketExecutor((MySQLComQueryPacket) commandPacket, connectionSession);
case COM_PING:
return new MySQLComPingExecutor(connectionSession);
case COM_STMT_PREPARE:
return new MySQLComStmtPrepareExecutor((MySQLComStmtPreparePacket) commandPacket, connectionSession);
case COM_STMT_EXECUTE:
return new MySQLComStmtExecuteExecutor((MySQLComStmtExecutePacket) commandPacket, connectionSession);
case COM_STMT_SEND_LONG_DATA:
return new MySQLComStmtSendLongDataExecutor((MySQLComStmtSendLongDataPacket) commandPacket, connectionSession);
case COM_STMT_RESET:
return new MySQLComStmtResetExecutor((MySQLComStmtResetPacket) commandPacket, connectionSession);
case COM_STMT_CLOSE:
return new MySQLComStmtCloseExecutor((MySQLComStmtClosePacket) commandPacket, connectionSession);
case COM_SET_OPTION:
return new MySQLComSetOptionExecutor((MySQLComSetOptionPacket) commandPacket, connectionSession);
case COM_RESET_CONNECTION:
return new MySQLComResetConnectionExecutor(connectionSession);
default:
return new MySQLUnsupportedCommandExecutor(commandPacketType);
}
}
|
@Test
void assertNewInstanceWithComStmtSendLongData() throws SQLException {
assertThat(MySQLCommandExecutorFactory.newInstance(MySQLCommandPacketType.COM_STMT_SEND_LONG_DATA, mock(MySQLComStmtSendLongDataPacket.class), connectionSession),
instanceOf(MySQLComStmtSendLongDataExecutor.class));
}
|
@Override
public void merge(ColumnStatisticsObj aggregateColStats, ColumnStatisticsObj newColStats) {
LOG.debug("Merging statistics: [aggregateColStats:{}, newColStats: {}]", aggregateColStats, newColStats);
DateColumnStatsDataInspector aggregateData = dateInspectorFromStats(aggregateColStats);
DateColumnStatsDataInspector newData = dateInspectorFromStats(newColStats);
Date lowValue = mergeLowValue(getLowValue(aggregateData), getLowValue(newData));
if (lowValue != null) {
aggregateData.setLowValue(lowValue);
}
Date highValue = mergeHighValue(getHighValue(aggregateData), getHighValue(newData));
if (highValue != null) {
aggregateData.setHighValue(highValue);
}
aggregateData.setNumNulls(mergeNumNulls(aggregateData.getNumNulls(), newData.getNumNulls()));
NumDistinctValueEstimator oldNDVEst = aggregateData.getNdvEstimator();
NumDistinctValueEstimator newNDVEst = newData.getNdvEstimator();
List<NumDistinctValueEstimator> ndvEstimatorsList = Arrays.asList(oldNDVEst, newNDVEst);
aggregateData.setNumDVs(mergeNumDistinctValueEstimator(aggregateColStats.getColName(),
ndvEstimatorsList, aggregateData.getNumDVs(), newData.getNumDVs()));
aggregateData.setNdvEstimator(ndvEstimatorsList.get(0));
KllHistogramEstimator oldKllEst = aggregateData.getHistogramEstimator();
KllHistogramEstimator newKllEst = newData.getHistogramEstimator();
aggregateData.setHistogramEstimator(mergeHistogramEstimator(aggregateColStats.getColName(), oldKllEst, newKllEst));
aggregateColStats.getStatsData().setDateStats(aggregateData);
}
|
@Test
public void testMergeNonNullWithNullValues() {
ColumnStatisticsObj aggrObj = createColumnStatisticsObj(new ColStatsBuilder<>(Date.class)
.low(DATE_1)
.high(DATE_3)
.numNulls(4)
.numDVs(2)
.hll(DATE_1.getDaysSinceEpoch(), DATE_3.getDaysSinceEpoch(), DATE_3.getDaysSinceEpoch())
.kll(DATE_1.getDaysSinceEpoch(), DATE_3.getDaysSinceEpoch(), DATE_3.getDaysSinceEpoch())
.build());
ColumnStatisticsObj newObj = createColumnStatisticsObj(new ColStatsBuilder<>(Date.class)
.low(null)
.high(null)
.numNulls(2)
.numDVs(0)
.build());
merger.merge(aggrObj, newObj);
ColumnStatisticsData expectedColumnStatisticsData = new ColStatsBuilder<>(Date.class)
.low(DATE_1)
.high(DATE_3)
.numNulls(6)
.numDVs(2)
.hll(DATE_1.getDaysSinceEpoch(), DATE_3.getDaysSinceEpoch(), DATE_3.getDaysSinceEpoch())
.kll(DATE_1.getDaysSinceEpoch(), DATE_3.getDaysSinceEpoch(), DATE_3.getDaysSinceEpoch())
.build();
assertEquals(expectedColumnStatisticsData, aggrObj.getStatsData());
}
|
public void execute() {
new PathAwareCrawler<>(
FormulaExecutorComponentVisitor.newBuilder(metricRepository, measureRepository).buildFor(formulas))
.visit(treeRootHolder.getReportTreeRoot());
}
|
@Test
public void compute_duplicated_blocks_one_for_original_one_for_each_InnerDuplicate() {
TextBlock original = new TextBlock(1, 1);
duplicationRepository.addDuplication(FILE_1_REF, original, new TextBlock(2, 2), new TextBlock(3, 3), new TextBlock(2, 3));
underTest.execute();
assertRawMeasureValue(FILE_1_REF, DUPLICATED_BLOCKS_KEY, 4);
}
|
public static void putIfValNoNull(Map target, Object key, Object value) {
Objects.requireNonNull(key, "key");
if (value != null) {
target.put(key, value);
}
}
|
@Test
void testPutIfValNoNull() {
Map<Object, Object> map = new HashMap<>();
MapUtil.putIfValNoNull(map, "key-1", null);
assertTrue(map.isEmpty());
MapUtil.putIfValNoNull(map, "key-1", "null");
assertTrue(map.containsKey("key-1"));
}
|
@Override
public void startServerSentEvents(DeviceId deviceId, String eventsUrl) {
this.getServerSentEvents(deviceId, eventsUrl,
(event) -> sendEvent(event, deviceId),
(error) -> log.error("Unable to handle {} SSEvent from {}. {}",
eventsUrl, deviceId, error));
}
|
@Test
public void testStartServerSentEvents() {
AtomicInteger listener1Count = new AtomicInteger();
AtomicInteger listener2Count = new AtomicInteger();
RestSBEventListener listener1 = event -> {
System.out.println("Event on Lsnr1: " + event);
listener1Count.incrementAndGet();
if (Integer.parseInt(event.getId()) == 8) {
controller.cancelServerSentEvents(device1.deviceId());
}
};
RestSBEventListener listener2 = event -> {
listener2Count.incrementAndGet();
System.out.println("Event on Lsnr2: " + event);
};
controller.addListener(listener1);
controller.addListener(listener2);
controller.startServerSentEvents(device1.deviceId(), "/testme/server-sent-events");
controller.removeListener(listener1);
controller.removeListener(listener2);
assertEquals(9, listener1Count.get());
assertEquals(9, listener2Count.get());
}
|
@Override
public CompletableFuture<Void> cleanupAsync(JobID jobId) {
mainThreadExecutor.assertRunningInMainThread();
CompletableFuture<Void> cleanupFuture = FutureUtils.completedVoidFuture();
for (CleanupWithLabel<T> cleanupWithLabel : prioritizedCleanup) {
cleanupFuture =
cleanupFuture.thenCompose(
ignoredValue ->
withRetry(
jobId,
cleanupWithLabel.getLabel(),
cleanupWithLabel.getCleanup()));
}
return cleanupFuture.thenCompose(
ignoredValue ->
FutureUtils.completeAll(
regularCleanup.stream()
.map(
cleanupWithLabel ->
withRetry(
jobId,
cleanupWithLabel.getLabel(),
cleanupWithLabel.getCleanup()))
.collect(Collectors.toList())));
}
|
@Test
void testConcurrentCleanupWithExceptionFirst() {
final SingleCallCleanup cleanup0 = SingleCallCleanup.withoutCompletionOnCleanup();
final SingleCallCleanup cleanup1 = SingleCallCleanup.withoutCompletionOnCleanup();
final CompletableFuture<Void> cleanupResult =
createTestInstanceBuilder()
.withRegularCleanup("Reg #0", cleanup0)
.withRegularCleanup("Reg #1", cleanup1)
.build()
.cleanupAsync(JOB_ID);
assertThat(cleanupResult).isNotCompleted();
assertThat(cleanup0).extracting(SingleCallCleanup::getProcessedJobId).isEqualTo(JOB_ID);
assertThat(cleanup1).extracting(SingleCallCleanup::getProcessedJobId).isEqualTo(JOB_ID);
final RuntimeException expectedException = new RuntimeException("Expected exception");
cleanup0.completeCleanupExceptionally(expectedException);
assertThat(cleanupResult).isNotCompleted();
cleanup1.completeCleanup();
assertThatFuture(cleanupResult)
.eventuallyFailsWith(ExecutionException.class)
.extracting(FlinkAssertions::chainOfCauses, STREAM_THROWABLE)
.hasExactlyElementsOfTypes(
ExecutionException.class,
FutureUtils.RetryException.class,
CompletionException.class,
expectedException.getClass())
.last()
.isEqualTo(expectedException);
}
|
public Schema mergeTables(
Map<FeatureOption, MergingStrategy> mergingStrategies,
Schema sourceSchema,
List<SqlNode> derivedColumns,
List<SqlWatermark> derivedWatermarkSpecs,
SqlTableConstraint derivedPrimaryKey) {
SchemaBuilder schemaBuilder =
new SchemaBuilder(
mergingStrategies,
sourceSchema,
(FlinkTypeFactory) validator.getTypeFactory(),
dataTypeFactory,
validator,
escapeExpression);
schemaBuilder.appendDerivedColumns(mergingStrategies, derivedColumns);
schemaBuilder.appendDerivedWatermarks(mergingStrategies, derivedWatermarkSpecs);
schemaBuilder.appendDerivedPrimaryKey(derivedPrimaryKey);
return schemaBuilder.build();
}
|
@Test
void mergeExcludingWatermarksDuplicate() {
Schema sourceSchema =
Schema.newBuilder()
.column("one", DataTypes.INT())
.column("timestamp", DataTypes.TIMESTAMP())
.watermark("timestamp", "timestamp - INTERVAL '5' SECOND")
.build();
List<SqlWatermark> derivedWatermarkSpecs =
Collections.singletonList(
new SqlWatermark(
SqlParserPos.ZERO,
identifier("timestamp"),
boundedStrategy("timestamp", "10")));
Map<FeatureOption, MergingStrategy> mergingStrategies = getDefaultMergingStrategies();
mergingStrategies.put(FeatureOption.WATERMARKS, MergingStrategy.EXCLUDING);
Schema mergedSchema =
util.mergeTables(
mergingStrategies,
sourceSchema,
Collections.emptyList(),
derivedWatermarkSpecs,
null);
Schema expectedSchema =
Schema.newBuilder()
.column("one", DataTypes.INT())
.column("timestamp", DataTypes.TIMESTAMP())
.watermark("timestamp", "`timestamp` - INTERVAL '10' SECOND")
.build();
assertThat(mergedSchema).isEqualTo(expectedSchema);
}
|
@Override
public List<Connection> getConnections(final String databaseName, final String dataSourceName, final int connectionOffset, final int connectionSize,
final ConnectionMode connectionMode) throws SQLException {
return getConnections0(databaseName, dataSourceName, connectionOffset, connectionSize, connectionMode);
}
|
@Test
void assertGetConnectionsWhenConnectionCreateFailed() {
SQLException ex = assertThrows(SQLException.class, () -> databaseConnectionManager.getConnections(DefaultDatabase.LOGIC_NAME, "invalid_ds", 0, 3, ConnectionMode.CONNECTION_STRICTLY));
assertThat(ex.getMessage(), is("Can not get 3 connections one time, partition succeed connection(0) have released. "
+ "Please consider increasing the 'maxPoolSize' of the data sources or decreasing the 'max-connections-size-per-query' in properties." + System.lineSeparator()
+ "More details: java.sql.SQLException: Mock invalid data source"));
}
|
public List<TStatisticData> queryStatisticSync(ConnectContext context, String tableUUID, Table table,
List<String> columnNames) throws AnalysisException {
if (table == null) {
// Statistical information query is an unlocked operation,
// so it is possible for the table to be deleted while the code is running
return Collections.emptyList();
}
List<Type> columnTypes = Lists.newArrayList();
for (String colName : columnNames) {
columnTypes.add(StatisticUtils.getQueryStatisticsColumnType(table, colName));
}
String sql = StatisticSQLBuilder.buildQueryExternalFullStatisticsSQL(tableUUID, columnNames, columnTypes);
return executeStatisticDQL(context, sql);
}
|
@Test
public void testEmpty() throws Exception {
StatisticExecutor statisticExecutor = new StatisticExecutor();
Database db = GlobalStateMgr.getCurrentState().getDb("test");
OlapTable olapTable = (OlapTable) db.getTable("t0");
GlobalStateMgr.getCurrentState().getAnalyzeMgr().addBasicStatsMeta(new BasicStatsMeta(db.getId(), olapTable.getId(), null,
StatsConstants.AnalyzeType.FULL,
LocalDateTime.of(2020, 1, 1, 1, 1, 1),
Maps.newHashMap()));
Assert.assertThrows(SemanticException.class,
() -> statisticExecutor.queryStatisticSync(
StatisticUtils.buildConnectContext(), db.getId(), olapTable.getId(), Lists.newArrayList("foo", "bar")));
}
|
protected String upgradeUrl(URI wsURL) {
if (absoluteUpgradeUrl) {
return wsURL.toString();
}
String path = wsURL.getRawPath();
path = path == null || path.isEmpty() ? "/" : path;
String query = wsURL.getRawQuery();
return query != null && !query.isEmpty() ? path + '?' + query : path;
}
|
@Test
@SuppressWarnings("deprecation")
public void testUpgradeUrl() {
URI uri = URI.create("ws://localhost:9999/path%20with%20ws");
WebSocketClientHandshaker handshaker = newHandshaker(uri);
FullHttpRequest request = handshaker.newHandshakeRequest();
try {
assertEquals("/path%20with%20ws", request.getUri());
} finally {
request.release();
}
}
|
@Override
public boolean createTopicOnTopicBrokerIfNotExist(String createTopic, String sampleTopic, int wQueueNum,
int rQueueNum, boolean examineTopic, int retryCheckCount) {
TopicRouteData curTopicRouteData = new TopicRouteData();
try {
curTopicRouteData = this.getTopicRouteDataDirectlyFromNameServer(createTopic);
} catch (Exception e) {
if (!TopicRouteHelper.isTopicNotExistError(e)) {
log.error("get cur topic route {} failed.", createTopic, e);
return false;
}
}
TopicRouteData sampleTopicRouteData = null;
try {
sampleTopicRouteData = this.getTopicRouteDataDirectlyFromNameServer(sampleTopic);
} catch (Exception e) {
log.error("create topic {} failed.", createTopic, e);
return false;
}
if (sampleTopicRouteData == null || sampleTopicRouteData.getBrokerDatas().isEmpty()) {
return false;
}
try {
return this.createTopicOnBroker(createTopic, wQueueNum, rQueueNum, curTopicRouteData.getBrokerDatas(),
sampleTopicRouteData.getBrokerDatas(), examineTopic, retryCheckCount);
} catch (Exception e) {
log.error("create topic {} failed.", createTopic, e);
}
return false;
}
|
@Test
public void testCreateTopic() throws Exception {
when(mqClientAPIExt.getTopicRouteInfoFromNameServer(eq("createTopic"), anyLong()))
.thenThrow(new MQClientException(ResponseCode.TOPIC_NOT_EXIST, ""))
.thenReturn(createTopicRouteData(1));
when(mqClientAPIExt.getTopicRouteInfoFromNameServer(eq("sampleTopic"), anyLong()))
.thenReturn(createTopicRouteData(2));
ArgumentCaptor<String> addrArgumentCaptor = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<TopicConfig> topicConfigArgumentCaptor = ArgumentCaptor.forClass(TopicConfig.class);
doNothing().when(mqClientAPIExt).createTopic(addrArgumentCaptor.capture(), anyString(), topicConfigArgumentCaptor.capture(), anyLong());
assertTrue(defaultAdminService.createTopicOnTopicBrokerIfNotExist(
"createTopic",
"sampleTopic",
7,
8,
true,
1
));
assertEquals(2, addrArgumentCaptor.getAllValues().size());
Set<String> createAddr = new HashSet<>(addrArgumentCaptor.getAllValues());
assertTrue(createAddr.contains("127.0.0.1:10911"));
assertTrue(createAddr.contains("127.0.0.2:10911"));
assertEquals("createTopic", topicConfigArgumentCaptor.getValue().getTopicName());
assertEquals(7, topicConfigArgumentCaptor.getValue().getWriteQueueNums());
assertEquals(8, topicConfigArgumentCaptor.getValue().getReadQueueNums());
}
|
public Statement buildStatement(final ParserRuleContext parseTree) {
return build(Optional.of(getSources(parseTree)), parseTree);
}
|
@Test
public void shouldRejectIncorrectlyTypedHeadersColumns() {
// Given:
final SingleStatementContext stmt
= givenQuery("CREATE STREAM INPUT (K BIGINT HEADERS) WITH (kafka_topic='input',value_format='JSON');");
// When:
final KsqlException e = assertThrows(KsqlException.class, () -> {
builder.buildStatement(stmt);
});
// Then:
assertThat(e.getMessage(), is("Invalid type for HEADERS column: expected ARRAY<STRUCT<`KEY` STRING, `VALUE` BYTES>>, got BIGINT"));
}
|
public void patchCommentById(
final Long memberId,
final Long commentId,
final CommentPatchRequest request
) {
Comment comment = findComment(commentId);
comment.update(request.comment(), memberId);
}
|
@Test
void ๊ธ์ด์ด๊ฐ_์๋๋ผ๋ฉด_๋๊ธ์_์์ ํ์ง_๋ชปํ๋ค() {
// given
commentRepository.save(๋๊ธ_์์ฑ());
String text = "edit";
CommentPatchRequest req = new CommentPatchRequest(text);
// when & then
assertThatThrownBy(() -> commentService.patchCommentById(2L, 1L, req))
.isInstanceOf(CommentWriterNotEqualsException.class);
}
|
public static <T, R> Supplier<R> andThen(Supplier<T> supplier, Function<T, R> resultHandler) {
return () -> resultHandler.apply(supplier.get());
}
|
@Test
public void shouldChainSupplierAndResultHandler() {
Supplier<String> supplier = () -> "BLA";
Supplier<String> supplierWithRecovery = SupplierUtils.andThen(supplier, result -> "Bla");
String result = supplierWithRecovery.get();
assertThat(result).isEqualTo("Bla");
}
|
@Override
public void onApplicationEvent(@NotNull final ContextRefreshedEvent event) {
startGrpcServer();
}
|
@Test
public void testOnApplicationEvent() {
GrpcServerBuilder testGrpcServerBuilder = () -> ServerBuilder.forPort(8088);
GrpcServerRunner testGrpcServerRunner = new GrpcServerRunner(testGrpcServerBuilder, testGrpcClientEventListener);
testGrpcServerRunner.onApplicationEvent(testEvent);
}
|
private static void execute(String... args) throws Exception {
LogDirsCommandOptions options = new LogDirsCommandOptions(args);
try (Admin adminClient = createAdminClient(options)) {
execute(options, adminClient);
}
}
|
@Test
@SuppressWarnings("unchecked")
public void shouldNotThrowWhenDuplicatedBrokers() throws JsonProcessingException {
Node broker = new Node(1, "hostname", 9092);
try (MockAdminClient adminClient = new MockAdminClient(Collections.singletonList(broker), broker)) {
String standardOutput = execute(fromArgsToOptions("--bootstrap-server", "EMPTY", "--broker-list", "1,1", "--describe"), adminClient);
String[] standardOutputLines = standardOutput.split("\n");
assertEquals(3, standardOutputLines.length);
Map<String, Object> information = new ObjectMapper().readValue(standardOutputLines[2], HashMap.class);
List<Object> brokersInformation = (List<Object>) information.get("brokers");
Integer brokerId = (Integer) ((HashMap<String, Object>) brokersInformation.get(0)).get("broker");
assertEquals(1, brokersInformation.size());
assertEquals(1, brokerId);
}
}
|
public static Event createAlert(String name, @Nullable String data, @Nullable String description) {
return new Event(name, Category.ALERT, data, description);
}
|
@Test
public void same_name_and_category_make_equal_events() {
Event source = Event.createAlert(SOME_NAME, null, null);
assertThat(source)
.isEqualTo(Event.createAlert(SOME_NAME, null, null))
.isEqualTo(source)
.isNotNull();
}
|
@Override
public String getClass(final Path file) throws BackgroundException {
if(containerService.isContainer(file)) {
final String key = String.format("s3.storageclass.%s", containerService.getContainer(file).getName());
if(StringUtils.isNotBlank(new HostPreferences(session.getHost()).getProperty(key))) {
return new HostPreferences(session.getHost()).getProperty(key);
}
return null;
}
// HEAD request provides storage class information of the object.
// S3 returns this header for all objects except for Standard storage class objects.
final String redundancy = new S3AttributesFinderFeature(session, acl).find(file).getStorageClass();
if(StringUtils.isBlank(redundancy)) {
return S3Object.STORAGE_CLASS_STANDARD;
}
return redundancy;
}
|
@Test(expected = NotfoundException.class)
public void testNotFound() throws Exception {
final Path container = new Path("versioning-test-eu-central-1-cyberduck", EnumSet.of(Path.Type.volume, Path.Type.directory));
final Path test = new Path(container, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file));
final S3StorageClassFeature feature = new S3StorageClassFeature(session, new S3AccessControlListFeature(session));
feature.getClass(test);
}
|
@ConstantFunction(name = "subtract", argTypes = {LARGEINT, LARGEINT}, returnType = LARGEINT, isMonotonic = true)
public static ConstantOperator subtractLargeInt(ConstantOperator first, ConstantOperator second) {
return ConstantOperator.createLargeInt(first.getLargeInt().subtract(second.getLargeInt()));
}
|
@Test
public void subtractLargeInt() {
assertEquals("0",
ScalarOperatorFunctions.subtractLargeInt(O_LI_100, O_LI_100).getLargeInt().toString());
}
|
public Object[] parseParameterFor(String resource, T request, Predicate<GatewayFlowRule> rulePredicate) {
if (StringUtil.isEmpty(resource) || request == null || rulePredicate == null) {
return new Object[0];
}
Set<GatewayFlowRule> gatewayRules = new HashSet<>();
Set<Boolean> predSet = new HashSet<>();
boolean hasNonParamRule = false;
for (GatewayFlowRule rule : GatewayRuleManager.getRulesForResource(resource)) {
if (rule.getParamItem() != null) {
gatewayRules.add(rule);
predSet.add(rulePredicate.test(rule));
} else {
hasNonParamRule = true;
}
}
if (!hasNonParamRule && gatewayRules.isEmpty()) {
return new Object[0];
}
if (predSet.size() > 1 || predSet.contains(false)) {
return new Object[0];
}
int size = hasNonParamRule ? gatewayRules.size() + 1 : gatewayRules.size();
Object[] arr = new Object[size];
for (GatewayFlowRule rule : gatewayRules) {
GatewayParamFlowItem paramItem = rule.getParamItem();
int idx = paramItem.getIndex();
String param = parseInternal(paramItem, request);
arr[idx] = param;
}
if (hasNonParamRule) {
arr[size - 1] = SentinelGatewayConstants.GATEWAY_DEFAULT_PARAM;
}
return arr;
}
|
@Test
public void testParseParametersWithItems() {
RequestItemParser<Object> itemParser = mock(RequestItemParser.class);
GatewayParamParser<Object> paramParser = new GatewayParamParser<>(itemParser);
// Create a fake request.
Object request = new Object();
// Prepare gateway rules.
Set<GatewayFlowRule> rules = new HashSet<>();
final String routeId1 = "my_test_route_A";
final String api1 = "my_test_route_B";
final String headerName = "X-Sentinel-Flag";
final String paramName = "p";
final String cookieName = "myCookie";
GatewayFlowRule routeRuleNoParam = new GatewayFlowRule(routeId1)
.setCount(10)
.setIntervalSec(10);
GatewayFlowRule routeRule1 = new GatewayFlowRule(routeId1)
.setCount(2)
.setIntervalSec(2)
.setBurst(2)
.setParamItem(new GatewayParamFlowItem()
.setParseStrategy(SentinelGatewayConstants.PARAM_PARSE_STRATEGY_CLIENT_IP)
);
GatewayFlowRule routeRule2 = new GatewayFlowRule(routeId1)
.setCount(10)
.setIntervalSec(1)
.setControlBehavior(RuleConstant.CONTROL_BEHAVIOR_RATE_LIMITER)
.setMaxQueueingTimeoutMs(600)
.setParamItem(new GatewayParamFlowItem()
.setParseStrategy(SentinelGatewayConstants.PARAM_PARSE_STRATEGY_HEADER)
.setFieldName(headerName)
);
GatewayFlowRule routeRule3 = new GatewayFlowRule(routeId1)
.setCount(20)
.setIntervalSec(1)
.setBurst(5)
.setParamItem(new GatewayParamFlowItem()
.setParseStrategy(SentinelGatewayConstants.PARAM_PARSE_STRATEGY_URL_PARAM)
.setFieldName(paramName)
);
GatewayFlowRule routeRule4 = new GatewayFlowRule(routeId1)
.setCount(120)
.setIntervalSec(10)
.setBurst(30)
.setParamItem(new GatewayParamFlowItem()
.setParseStrategy(SentinelGatewayConstants.PARAM_PARSE_STRATEGY_HOST)
);
GatewayFlowRule routeRule5 = new GatewayFlowRule(routeId1)
.setCount(50)
.setIntervalSec(30)
.setParamItem(new GatewayParamFlowItem()
.setParseStrategy(SentinelGatewayConstants.PARAM_PARSE_STRATEGY_COOKIE)
.setFieldName(cookieName)
);
GatewayFlowRule apiRule1 = new GatewayFlowRule(api1)
.setResourceMode(SentinelGatewayConstants.RESOURCE_MODE_CUSTOM_API_NAME)
.setCount(5)
.setIntervalSec(1)
.setParamItem(new GatewayParamFlowItem()
.setParseStrategy(SentinelGatewayConstants.PARAM_PARSE_STRATEGY_URL_PARAM)
.setFieldName(paramName)
);
rules.add(routeRule1);
rules.add(routeRule2);
rules.add(routeRule3);
rules.add(routeRule4);
rules.add(routeRule5);
rules.add(routeRuleNoParam);
rules.add(apiRule1);
GatewayRuleManager.loadRules(rules);
final String expectedHost = "hello.test.sentinel";
final String expectedAddress = "66.77.88.99";
final String expectedHeaderValue1 = "Sentinel";
final String expectedUrlParamValue1 = "17";
final String expectedCookieValue1 = "Sentinel-Foo";
mockClientHostAddress(itemParser, expectedAddress);
Map<String, String> expectedHeaders = new HashMap<String, String>() {{
put(headerName, expectedHeaderValue1); put("Host", expectedHost);
}};
mockHeaders(itemParser, expectedHeaders);
mockSingleUrlParam(itemParser, paramName, expectedUrlParamValue1);
mockSingleCookie(itemParser, cookieName, expectedCookieValue1);
Object[] params = paramParser.parseParameterFor(routeId1, request, routeIdPredicate);
// Param length should be 6 (5 with parameters, 1 normal flow with generated constant)
assertThat(params.length).isEqualTo(6);
assertThat(params[routeRule1.getParamItem().getIndex()]).isEqualTo(expectedAddress);
assertThat(params[routeRule2.getParamItem().getIndex()]).isEqualTo(expectedHeaderValue1);
assertThat(params[routeRule3.getParamItem().getIndex()]).isEqualTo(expectedUrlParamValue1);
assertThat(params[routeRule4.getParamItem().getIndex()]).isEqualTo(expectedHost);
assertThat(params[routeRule5.getParamItem().getIndex()]).isEqualTo(expectedCookieValue1);
assertThat(params[params.length - 1]).isEqualTo(SentinelGatewayConstants.GATEWAY_DEFAULT_PARAM);
assertThat(paramParser.parseParameterFor(api1, request, routeIdPredicate).length).isZero();
String expectedUrlParamValue2 = "fs";
mockSingleUrlParam(itemParser, paramName, expectedUrlParamValue2);
params = paramParser.parseParameterFor(api1, request, apiNamePredicate);
assertThat(params.length).isEqualTo(1);
assertThat(params[apiRule1.getParamItem().getIndex()]).isEqualTo(expectedUrlParamValue2);
}
|
public String cleanupDwgString(String dwgString) {
String cleanString = dwgString;
StringBuilder sb = new StringBuilder();
//Strip off start/stop underline/overstrike/strike throughs
Matcher m = Pattern.compile(underlineStrikeThrough).matcher(cleanString);
while (m.find()) {
if (! m.group(1).endsWith("\\")) {
m.appendReplacement(sb, "");
}
}
m.appendTail(sb);
cleanString = sb.toString();
//Strip off semi-colon ended markers
m = Pattern.compile(endMarks).matcher(cleanString);
sb.setLength(0);
while (m.find()) {
if (! m.group(1).endsWith("\\")) {
m.appendReplacement(sb, "");
}
}
m.appendTail(sb);
cleanString = sb.toString();
//new line marker \\P replace with actual new line
m = Pattern.compile(newLine).matcher(cleanString);
sb.setLength(0);
while (m.find()) {
if (m.group(1).endsWith("P")) {
m.appendReplacement(sb, "\n");
}
}
m.appendTail(sb);
cleanString = sb.toString();
//stacking fractions
m = Pattern.compile(stackFrac).matcher(cleanString);
sb.setLength(0);
while (m.find()) {
if (m.group(1) == null) {
m.appendReplacement(sb, m.group(2) + "/" + m.group(3));
}
}
m.appendTail(sb);
cleanString = sb.toString();
//strip brackets around text, make sure they aren't escaped
m = Pattern.compile(curlyBraces).matcher(cleanString);
sb.setLength(0);
while (m.find()) {
if (m.group(1) == null) {
m.appendReplacement(sb, "");
}
}
m.appendTail(sb);
cleanString = sb.toString();
//now get rid of escape characters
cleanString = cleanString.replaceAll(escapeChars, "");
//now unescape backslash
cleanString = cleanString.replaceAll("(\\\\\\\\)", "\\\\");
return cleanString;
}
|
@Test
public void testUnderlineEtc() {
String formatted = "l \\L open cu\\lrly bra\\Kck\\ket \\{ and a close " +
"\\} right?";
DWGReadFormatRemover dwgReadFormatter = new DWGReadFormatRemover();
String expected = "l open curly bracket { and a close } right?";
assertEquals(expected, dwgReadFormatter.cleanupDwgString(formatted));
}
|
public static Getter newFieldGetter(Object object, Getter parent, Field field, String modifier) throws Exception {
return newGetter(object, parent, modifier, field.getType(), field::get,
(t, et) -> new FieldGetter(parent, field, modifier, t, et));
}
|
@Test
public void newFieldGetter_whenExtractingFromNull_Array_FieldAndParentIsNonEmptyMultiResult_thenInferReturnType()
throws Exception {
OuterObject object = new OuterObject("name", InnerObject.nullInner("inner"));
Getter parentGetter = GetterFactory.newFieldGetter(object, null, innersArrayField, "[any]");
Getter innerObjectNameGetter = GetterFactory.newFieldGetter(object, parentGetter, innerAttributesArrayField, "[any]");
Class<?> returnType = innerObjectNameGetter.getReturnType();
assertEquals(Integer.class, returnType);
}
|
public String getMqttServer() {
return mqttServer;
}
|
@Test
public void testSaveDefaultEmptyConnection() {
MQTTConsumerMeta roundTrippedMeta = fromXml( meta.getXML() );
assertThat( roundTrippedMeta, equalTo( meta ) );
assertTrue( roundTrippedMeta.getMqttServer().isEmpty() );
}
|
@Override
public void onWheelMeasurementReceived(@NonNull final BluetoothDevice device, final long wheelRevolutions, final int lastWheelEventTime) {
if (mLastWheelEventTime == lastWheelEventTime)
return;
if (mLastWheelRevolutions >= 0) {
final float circumference = getWheelCircumference();
float timeDifference;
if (lastWheelEventTime < mLastWheelEventTime)
timeDifference = (65535 + lastWheelEventTime - mLastWheelEventTime) / 1024.0f; // [s]
else
timeDifference = (lastWheelEventTime - mLastWheelEventTime) / 1024.0f; // [s]
final float distanceDifference = (wheelRevolutions - mLastWheelRevolutions) * circumference / 1000.0f; // [m]
final float totalDistance = (float) wheelRevolutions * circumference / 1000.0f; // [m]
final float distance = (float) (wheelRevolutions - mInitialWheelRevolutions) * circumference / 1000.0f; // [m]
final float speed = distanceDifference / timeDifference; // [m/s]
mWheelCadence = (wheelRevolutions - mLastWheelRevolutions) * 60.0f / timeDifference; // [revolutions/minute]
// Notify listener about the new measurement
onDistanceChanged(device, totalDistance, distance, speed);
}
mLastWheelRevolutions = wheelRevolutions;
mLastWheelEventTime = lastWheelEventTime;
}
|
@Test
public void onWheelMeasurementReceived() {
final DataReceivedCallback callback = new CyclingSpeedAndCadenceMeasurementDataCallback() {
@Override
public void onWheelMeasurementReceived(@NonNull final BluetoothDevice device, final long wheelRevolutions, final int lastWheelEventTime) {
assertEquals("Wheel measurement", 12345, wheelRevolutions);
assertEquals("Wheel last event time", 1000, lastWheelEventTime);
}
@Override
public void onInvalidDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) {
assertEquals("Correct CSC reported as invalid", 1, 2);
}
@Override
public void onDistanceChanged(@NonNull final BluetoothDevice device, final float totalDistance, final float distance, final float speed) {
// ignore
}
@Override
public void onCrankDataChanged(@NonNull final BluetoothDevice device, final float crankCadence, final float gearRatio) {
// ignore
}
};
final MutableData data = new MutableData(new byte[7]);
// Flags
assertTrue(data.setByte(0x01, 0));
// Wheel revolutions
assertTrue(data.setValue(12345, Data.FORMAT_UINT32_LE, 1));
assertTrue(data.setValue(1000, Data.FORMAT_UINT16_LE, 5));
callback.onDataReceived(null, data);
}
|
@CheckReturnValue
protected final boolean tryEmit(int ordinal, @Nonnull Object item) {
return outbox.offer(ordinal, item);
}
|
@Test
public void when_tryEmitTo1_then_emittedTo1() {
// When
boolean emitted = p.tryEmit(ORDINAL_1, MOCK_ITEM);
// Then
assertTrue(emitted);
validateReceptionAtOrdinals(MOCK_ITEM, ORDINAL_1);
}
|
@Override
public String getName() {
return FUNCTION_NAME;
}
|
@Test
public void testModuloTransformFunction() {
ExpressionContext expression =
RequestContextUtils.getExpression(String.format("mod(%s,%s)", INT_SV_COLUMN, LONG_SV_COLUMN));
TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
Assert.assertTrue(transformFunction instanceof ModuloTransformFunction);
Assert.assertEquals(transformFunction.getName(), ModuloTransformFunction.FUNCTION_NAME);
double[] expectedValues = new double[NUM_ROWS];
for (int i = 0; i < NUM_ROWS; i++) {
expectedValues[i] = (double) _intSVValues[i] % (double) _longSVValues[i];
}
testTransformFunction(transformFunction, expectedValues);
expression = RequestContextUtils.getExpression(String.format("mod(%s,%s)", LONG_SV_COLUMN, FLOAT_SV_COLUMN));
transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
Assert.assertTrue(transformFunction instanceof ModuloTransformFunction);
for (int i = 0; i < NUM_ROWS; i++) {
expectedValues[i] = (double) _longSVValues[i] % (double) _floatSVValues[i];
}
testTransformFunction(transformFunction, expectedValues);
expression =
RequestContextUtils.getExpression(String.format("mod(%s,%s)", FLOAT_SV_COLUMN, DOUBLE_SV_COLUMN));
transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
Assert.assertTrue(transformFunction instanceof ModuloTransformFunction);
for (int i = 0; i < NUM_ROWS; i++) {
expectedValues[i] = (double) _floatSVValues[i] % _doubleSVValues[i];
}
testTransformFunction(transformFunction, expectedValues);
expression =
RequestContextUtils.getExpression(String.format("mod(%s,%s)", DOUBLE_SV_COLUMN, STRING_SV_COLUMN));
transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
Assert.assertTrue(transformFunction instanceof ModuloTransformFunction);
for (int i = 0; i < NUM_ROWS; i++) {
expectedValues[i] = _doubleSVValues[i] % Double.parseDouble(_stringSVValues[i]);
}
testTransformFunction(transformFunction, expectedValues);
expression = RequestContextUtils.getExpression(String.format("mod(%s,%s)", STRING_SV_COLUMN, INT_SV_COLUMN));
transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
Assert.assertTrue(transformFunction instanceof ModuloTransformFunction);
for (int i = 0; i < NUM_ROWS; i++) {
expectedValues[i] = Double.parseDouble(_stringSVValues[i]) % (double) _intSVValues[i];
}
testTransformFunction(transformFunction, expectedValues);
expression = RequestContextUtils.getExpression(String
.format("mod(mod(mod(mod(mod(12,%s),%s),mod(mod(%s,%s),0.34)),%s),%s)", STRING_SV_COLUMN, DOUBLE_SV_COLUMN,
FLOAT_SV_COLUMN, LONG_SV_COLUMN, INT_SV_COLUMN, DOUBLE_SV_COLUMN));
transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
Assert.assertTrue(transformFunction instanceof ModuloTransformFunction);
for (int i = 0; i < NUM_ROWS; i++) {
expectedValues[i] = (((((12d % Double.parseDouble(_stringSVValues[i])) % _doubleSVValues[i]) % (
((double) _floatSVValues[i] % (double) _longSVValues[i]) % 0.34)) % (double) _intSVValues[i])
% _doubleSVValues[i]);
}
testTransformFunction(transformFunction, expectedValues);
}
|
@Nonnull
@Override
public <T> Future<T> submit(@Nonnull Callable<T> task) {
submitted.mark();
return delegate.submit(new InstrumentedCallable<>(task));
}
|
@Test
public void testSubmitRunnable() throws Exception {
assertThat(submitted.getCount()).isZero();
assertThat(running.getCount()).isZero();
assertThat(completed.getCount()).isZero();
assertThat(duration.getCount()).isZero();
assertThat(scheduledOnce.getCount()).isZero();
assertThat(scheduledRepetitively.getCount()).isZero();
assertThat(scheduledOverrun.getCount()).isZero();
assertThat(percentOfPeriod.getCount()).isZero();
Future<?> theFuture = instrumentedScheduledExecutor.submit(() -> {
assertThat(submitted.getCount()).isEqualTo(1);
assertThat(running.getCount()).isEqualTo(1);
assertThat(completed.getCount()).isZero();
assertThat(duration.getCount()).isZero();
assertThat(scheduledOnce.getCount()).isZero();
assertThat(scheduledRepetitively.getCount()).isZero();
assertThat(scheduledOverrun.getCount()).isZero();
assertThat(percentOfPeriod.getCount()).isZero();
});
theFuture.get();
assertThat(submitted.getCount()).isEqualTo(1);
assertThat(running.getCount()).isZero();
assertThat(completed.getCount()).isEqualTo(1);
assertThat(duration.getCount()).isEqualTo(1);
assertThat(duration.getSnapshot().size()).isEqualTo(1);
assertThat(scheduledOnce.getCount()).isZero();
assertThat(scheduledRepetitively.getCount()).isZero();
assertThat(scheduledOverrun.getCount()).isZero();
assertThat(percentOfPeriod.getCount()).isZero();
}
|
@Override
public Table getTable(String dbName, String tblName) {
Identifier identifier = new Identifier(dbName, tblName);
if (tables.containsKey(identifier)) {
return tables.get(identifier);
}
org.apache.paimon.table.Table paimonNativeTable;
try {
paimonNativeTable = this.paimonNativeCatalog.getTable(identifier);
} catch (Catalog.TableNotExistException e) {
LOG.error("Paimon table {}.{} does not exist.", dbName, tblName, e);
return null;
}
List<DataField> fields = paimonNativeTable.rowType().getFields();
ArrayList<Column> fullSchema = new ArrayList<>(fields.size());
for (DataField field : fields) {
String fieldName = field.name();
DataType type = field.type();
Type fieldType = ColumnTypeConverter.fromPaimonType(type);
Column column = new Column(fieldName, fieldType, true, field.description());
fullSchema.add(column);
}
long createTime = this.getTableCreateTime(dbName, tblName);
String comment = "";
if (paimonNativeTable.comment().isPresent()) {
comment = paimonNativeTable.comment().get();
}
PaimonTable table = new PaimonTable(this.catalogName, dbName, tblName, fullSchema, paimonNativeTable, createTime);
table.setComment(comment);
tables.put(identifier, table);
return table;
}
|
@Test
public void testPrunePaimonPartition() {
new MockUp<MetadataMgr>() {
@Mock
public List<RemoteFileInfo> getRemoteFiles(Table table, GetRemoteFilesParams params) {
return Lists.newArrayList(RemoteFileInfo.builder()
.setFiles(Lists.newArrayList(PaimonRemoteFileDesc.createPamonRemoteFileDesc(
new PaimonSplitsInfo(null, Lists.newArrayList((Split) splits.get(0))))))
.build());
}
};
new MockUp<PaimonMetadata>() {
@Mock
public long getTableCreateTime(String dbName, String tblName) {
return 0L;
}
};
PaimonTable paimonTable = (PaimonTable) metadata.getTable("db1", "tbl1");
ExternalScanPartitionPruneRule rule0 = ExternalScanPartitionPruneRule.PAIMON_SCAN;
ColumnRefOperator colRef1 = new ColumnRefOperator(1, Type.INT, "f2", true);
Column col1 = new Column("f2", Type.INT, true);
ColumnRefOperator colRef2 = new ColumnRefOperator(2, Type.STRING, "dt", true);
Column col2 = new Column("dt", Type.STRING, true);
Map<ColumnRefOperator, Column> colRefToColumnMetaMap = new HashMap<>();
Map<Column, ColumnRefOperator> columnMetaToColRefMap = new HashMap<>();
colRefToColumnMetaMap.put(colRef1, col1);
colRefToColumnMetaMap.put(colRef1, col1);
columnMetaToColRefMap.put(col2, colRef2);
columnMetaToColRefMap.put(col2, colRef2);
OptExpression scan =
new OptExpression(new LogicalPaimonScanOperator(paimonTable, colRefToColumnMetaMap, columnMetaToColRefMap,
-1, null));
rule0.transform(scan, new OptimizerContext(new Memo(), new ColumnRefFactory()));
assertEquals(1, ((LogicalPaimonScanOperator) scan.getOp()).getScanOperatorPredicates()
.getSelectedPartitionIds().size());
}
|
@Override
public void encode(Event event, OutputStream output) throws IOException {
String outputString = (format == null
? JSON_MAPPER.writeValueAsString(event.getData())
: StringInterpolation.evaluate(event, format))
+ delimiter;
output.write(outputString.getBytes(charset));
}
|
@Test
public void testEncode() throws IOException {
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
Line line = new Line(new ConfigurationImpl(Collections.emptyMap()), null);
Event e = new Event();
e.setField("myfield1", "myvalue1");
e.setField("myfield2", 42L);
line.encode(e, outputStream);
e.setField("myfield1", "myvalue2");
e.setField("myfield2", 43L);
line.encode(e, outputStream);
String delimiter = Line.DEFAULT_DELIMITER;
String resultingString = outputStream.toString();
// first delimiter should occur at the halfway point of the string
assertEquals(resultingString.indexOf(delimiter), (resultingString.length() / 2) - delimiter.length());
// second delimiter should occur at end of string
assertEquals(resultingString.lastIndexOf(delimiter), resultingString.length() - delimiter.length());
}
|
@Override
public void open() throws InterruptedException, JournalException {
// Open a new journal database or get last existing one as current journal database
List<Long> dbNames;
JournalException exception = null;
for (int i = 0; i < RETRY_TIME; i++) {
try {
// sleep for retry
if (i > 0) {
Thread.sleep(SLEEP_INTERVAL_SEC * 1000L);
}
dbNames = bdbEnvironment.getDatabaseNamesWithPrefix(prefix);
if (dbNames == null) { // bdb environment is closing
throw new JournalException("fail to get dbNames while open bdbje journal. will exit");
}
String dbName;
if (dbNames.isEmpty()) {
/*
* This is the very first time to open. Usually, we will open a new database named "1".
* But when we start cluster with an image file copied from other cluster,
* here we should open database with name image max journal id + 1.
* (default GlobalStateMgr.getCurrentState().getReplayedJournalId() is 0)
*/
if (prefix.isEmpty()) {
dbName = getFullDatabaseName(GlobalStateMgr.getCurrentState().getReplayedJournalId() + 1);
} else {
dbName = getFullDatabaseName(StarMgrServer.getCurrentState().getReplayId() + 1);
}
LOG.info("the very first time to open bdb, dbname is {}", dbName);
} else {
// get last database as current journal database
dbName = getFullDatabaseName(dbNames.get(dbNames.size() - 1));
}
if (currentJournalDB != null) {
currentJournalDB.close();
}
currentJournalDB = bdbEnvironment.openDatabase(dbName);
if (currentJournalDB == null) {
LOG.warn("fail to open database {}. retried {} times", dbName, i);
continue;
}
return;
} catch (DatabaseException e) {
String errMsg = String.format("catch exception after retried %d times", i + 1);
LOG.warn(errMsg, e);
exception = new JournalException(errMsg);
exception.initCause(e);
}
}
// failed after retry
throw exception;
}
|
@Test(expected = JournalException.class)
public void testOpenGetNamesFails(@Mocked BDBEnvironment environment) throws Exception {
new Expectations(environment) {
{
environment.getDatabaseNamesWithPrefix("");
times = 1;
result = null;
}
};
BDBJEJournal journal = new BDBJEJournal(environment);
journal.open();
Assert.fail();
}
|
public ClassLoader getClassLoader() {
return classLoader;
}
|
@Test
public void testCopyConstructor_withFullyConfiguredClientConfig() throws IOException {
URL schemaResource = ClientConfigTest.class.getClassLoader().getResource("hazelcast-client-full.xml");
ClientConfig expected = new XmlClientConfigBuilder(schemaResource).build();
ClientConfig actual = new ClientConfig(expected);
assertEquals(expected, actual);
}
|
public long getAndAdd(long delta) {
return getAndAddVal(delta);
}
|
@Test
public void testGetAndAdd() {
PaddedAtomicLong counter = new PaddedAtomicLong();
long value = counter.getAndAdd(10);
assertEquals(0L, value);
assertEquals(10, counter.get());
}
|
public PipelineGroups getLocal() {
PipelineGroups locals = new PipelineGroups();
for (PipelineConfigs pipelineConfigs : this) {
PipelineConfigs local = pipelineConfigs.getLocal();
if (local != null)
locals.add(local);
}
return locals;
}
|
@Test
public void shouldGetLocalPartsWhenOriginIsMixed() {
PipelineConfigs localGroup = createGroup("defaultGroup", createPipelineConfig("pipeline1", "stage1"));
localGroup.setOrigins(new FileConfigOrigin());
PipelineConfigs remoteGroup = createGroup("defaultGroup", createPipelineConfig("pipeline2", "stage1"));
remoteGroup.setOrigins(new RepoConfigOrigin());
MergePipelineConfigs mergePipelineConfigs = new MergePipelineConfigs(localGroup, remoteGroup);
PipelineGroups groups = new PipelineGroups(mergePipelineConfigs);
assertThat(groups.getLocal().size(), is(1));
assertThat(groups.getLocal(), hasItem(localGroup));
}
|
@Override
public boolean accept(final Path file, final Local local, final TransferStatus parent) throws BackgroundException {
if(super.accept(file, local, parent)) {
final Comparison comparison = this.comparison.compare(file, local, listener);
switch(comparison) {
case local:
if(log.isInfoEnabled()) {
log.info(String.format("Skip file %s with comparison %s", file, comparison));
}
return false;
case equal:
if(file.isDirectory()) {
return true;
}
if(log.isInfoEnabled()) {
log.info(String.format("Skip file %s with comparison %s", file, comparison));
}
return false;
case remote:
return true;
}
log.warn(String.format("Invalid comparison result %s", comparison));
}
return false;
}
|
@Test
public void testAcceptDirectory() throws Exception {
final CompareFilter filter = new CompareFilter(new DisabledDownloadSymlinkResolver(),
new NullSession(new Host(new TestProtocol())), new DownloadFilterOptions(new Host(new TestProtocol())), new DisabledProgressListener(),
new DefaultComparePathFilter(
new NullSession(new Host(new TestProtocol()))) {
@Override
public Comparison compare(final Path file, final Local local, final ProgressListener listener) {
return Comparison.equal;
}
});
assertTrue(
filter.accept(new Path("/n", EnumSet.of(Path.Type.directory)), new NullLocal("/n"),
new TransferStatus().exists(true)));
}
|
@Override
public boolean isSupport(URL address) {
return dubboCertManager != null && dubboCertManager.isConnected();
}
|
@Test
void testEnable() {
AtomicReference<DubboCertManager> reference = new AtomicReference<>();
try (MockedConstruction<DubboCertManager> construction =
Mockito.mockConstruction(DubboCertManager.class, (mock, context) -> {
reference.set(mock);
})) {
FrameworkModel frameworkModel = new FrameworkModel();
DubboCertProvider provider = new DubboCertProvider(frameworkModel);
Mockito.when(reference.get().isConnected()).thenReturn(true);
Assertions.assertTrue(provider.isSupport(null));
Mockito.when(reference.get().isConnected()).thenReturn(false);
Assertions.assertFalse(provider.isSupport(null));
frameworkModel.destroy();
}
}
|
public static String convertToString(Object parsedValue, Type type) {
if (parsedValue == null) {
return null;
}
if (type == null) {
return parsedValue.toString();
}
switch (type) {
case BOOLEAN:
case SHORT:
case INT:
case LONG:
case DOUBLE:
case STRING:
case PASSWORD:
return parsedValue.toString();
case LIST:
List<?> valueList = (List<?>) parsedValue;
return valueList.stream().map(Object::toString).collect(Collectors.joining(","));
case CLASS:
Class<?> clazz = (Class<?>) parsedValue;
return clazz.getName();
default:
throw new IllegalStateException("Unknown type.");
}
}
|
@Test
public void testConvertValueToStringNestedClass() throws ClassNotFoundException {
String actual = ConfigDef.convertToString(NestedClass.class, Type.CLASS);
assertEquals("org.apache.kafka.common.config.ConfigDefTest$NestedClass", actual);
// Additionally validate that we can look up this class by this name
assertEquals(NestedClass.class, Class.forName(actual));
}
|
@Override
public boolean shouldRescale(
VertexParallelism currentParallelism, VertexParallelism newParallelism) {
for (JobVertexID vertex : currentParallelism.getVertices()) {
int parallelismChange =
newParallelism.getParallelism(vertex)
- currentParallelism.getParallelism(vertex);
if (parallelismChange != 0) {
return true;
}
}
return false;
}
|
@Test
void testAlwaysScaleDown() {
final RescalingController rescalingController =
new EnforceParallelismChangeRescalingController();
assertThat(rescalingController.shouldRescale(forParallelism(2), forParallelism(1)))
.isTrue();
}
|
@Override
public AttributedList<Path> read(final Path directory, final List<String> replies) throws FTPInvalidListException {
final AttributedList<Path> children = new AttributedList<>();
if(replies.isEmpty()) {
return children;
}
// At least one entry successfully parsed
boolean success = false;
for(String line : replies) {
final Map<String, Map<String, String>> file = this.parseFacts(line);
if(null == file) {
log.error(String.format("Error parsing line %s", line));
continue;
}
for(Map.Entry<String, Map<String, String>> f : file.entrySet()) {
final String name = f.getKey();
// size -- Size in octets
// modify -- Last modification time
// create -- Creation time
// type -- Entry type
// unique -- Unique id of file/directory
// perm -- File permissions, whether read, write, execute is allowed for the login id.
// lang -- Language of the file name per IANA [11] registry.
// media-type -- MIME media-type of file contents per IANA registry.
// charset -- Character set per IANA registry (if not UTF-8)
final Map<String, String> facts = f.getValue();
if(!facts.containsKey("type")) {
log.error(String.format("No type fact in line %s", line));
continue;
}
final Path parsed;
if("dir".equals(facts.get("type").toLowerCase(Locale.ROOT))) {
parsed = new Path(directory, PathNormalizer.name(f.getKey()), EnumSet.of(Path.Type.directory));
}
else if("file".equals(facts.get("type").toLowerCase(Locale.ROOT))) {
parsed = new Path(directory, PathNormalizer.name(f.getKey()), EnumSet.of(Path.Type.file));
}
else if(facts.get("type").toLowerCase(Locale.ROOT).matches("os\\.unix=slink:.*")) {
parsed = new Path(directory, PathNormalizer.name(f.getKey()), EnumSet.of(Path.Type.file, Path.Type.symboliclink));
// Parse symbolic link target in Type=OS.unix=slink:/foobar;Perm=;Unique=keVO1+4G4; foobar
final String[] type = facts.get("type").split(":");
if(type.length == 2) {
final String target = type[1];
if(target.startsWith(String.valueOf(Path.DELIMITER))) {
parsed.setSymlinkTarget(new Path(PathNormalizer.normalize(target), EnumSet.of(Path.Type.file)));
}
else {
parsed.setSymlinkTarget(new Path(PathNormalizer.normalize(String.format("%s/%s", directory.getAbsolute(), target)), EnumSet.of(Path.Type.file)));
}
}
else {
log.warn(String.format("Missing symbolic link target for type %s in line %s", facts.get("type"), line));
continue;
}
}
else {
log.warn(String.format("Ignored type %s in line %s", facts.get("type"), line));
continue;
}
if(!success) {
if(parsed.isDirectory() && directory.getName().equals(name)) {
log.warn(String.format("Possibly bogus response line %s", line));
}
else {
success = true;
}
}
if(name.equals(".") || name.equals("..")) {
if(log.isDebugEnabled()) {
log.debug(String.format("Skip %s", name));
}
continue;
}
if(facts.containsKey("size")) {
parsed.attributes().setSize(Long.parseLong(facts.get("size")));
}
if(facts.containsKey("unix.uid")) {
parsed.attributes().setOwner(facts.get("unix.uid"));
}
if(facts.containsKey("unix.owner")) {
parsed.attributes().setOwner(facts.get("unix.owner"));
}
if(facts.containsKey("unix.gid")) {
parsed.attributes().setGroup(facts.get("unix.gid"));
}
if(facts.containsKey("unix.group")) {
parsed.attributes().setGroup(facts.get("unix.group"));
}
if(facts.containsKey("unix.mode")) {
parsed.attributes().setPermission(new Permission(facts.get("unix.mode")));
}
else if(facts.containsKey("perm")) {
if(PreferencesFactory.get().getBoolean("ftp.parser.mlsd.perm.enable")) {
Permission.Action user = Permission.Action.none;
final String flags = facts.get("perm");
if(StringUtils.contains(flags, 'r') || StringUtils.contains(flags, 'l')) {
// RETR command may be applied to that object
// Listing commands, LIST, NLST, and MLSD may be applied
user = user.or(Permission.Action.read);
}
if(StringUtils.contains(flags, 'w') || StringUtils.contains(flags, 'm') || StringUtils.contains(flags, 'c')) {
user = user.or(Permission.Action.write);
}
if(StringUtils.contains(flags, 'e')) {
// CWD command naming the object should succeed
user = user.or(Permission.Action.execute);
if(parsed.isDirectory()) {
user = user.or(Permission.Action.read);
}
}
final Permission permission = new Permission(user, Permission.Action.none, Permission.Action.none);
parsed.attributes().setPermission(permission);
}
}
if(facts.containsKey("modify")) {
// Time values are always represented in UTC
parsed.attributes().setModificationDate(this.parseTimestamp(facts.get("modify")));
}
if(facts.containsKey("create")) {
// Time values are always represented in UTC
parsed.attributes().setCreationDate(this.parseTimestamp(facts.get("create")));
}
children.add(parsed);
}
}
if(!success) {
throw new FTPInvalidListException(children);
}
return children;
}
|
@Test
public void testSkipCurrentAndParentDir() throws Exception {
Path directory = new Path("/", EnumSet.of(Path.Type.directory));
String[] replies = new String[]{
"type=dir;size=512;modify=20150115041252;create=20150115041212;perm=cdeflmp; .",
"type=dir;size=512;modify=20150115041252;create=20150115041212;perm=cdeflmp; ..",
"type=dir;size=512;modify=20150115041245;create=20150115041242;perm=cdeflmp; AVID",
"type=dir;size=512;modify=20150115041252;create=20150115041250;perm=cdeflmp; QTS"
};
final AttributedList<Path> children = new FTPMlsdListResponseReader()
.read(directory, Arrays.asList(replies));
assertEquals(2, children.size());
}
|
@Override
public ScalarOperator visitCollectionElement(CollectionElementOperator collectionElementOp, Void context) {
return shuttleIfUpdate(collectionElementOp);
}
|
@Test
void visitCollectionElement() {
ArrayOperator arrayOperator = new ArrayOperator(ARRAY_TINYINT, true, Lists.newArrayList(ConstantOperator.createInt(3)));
CollectionElementOperator operator =
new CollectionElementOperator(STRING, arrayOperator, ConstantOperator.createInt(0), false);
{
ScalarOperator newOperator = shuttle.visitCollectionElement(operator, null);
assertEquals(operator, newOperator);
}
{
ScalarOperator newOperator = shuttle2.visitCollectionElement(operator, null);
assertEquals(operator, newOperator);
}
}
|
public MessageType convert(Schema avroSchema) {
if (!avroSchema.getType().equals(Schema.Type.RECORD)) {
throw new IllegalArgumentException("Avro schema must be a record.");
}
return new MessageType(avroSchema.getFullName(), convertFields(avroSchema.getFields(), ""));
}
|
@Test
public void testTimeMillisType() throws Exception {
Schema date = LogicalTypes.timeMillis().addToSchema(Schema.create(INT));
Schema expected = Schema.createRecord(
"myrecord", null, null, false, Arrays.asList(new Schema.Field("time", date, null, null)));
testRoundTripConversion(
expected, "message myrecord {\n" + " required int32 time (TIME(MILLIS,true));\n" + "}\n");
for (PrimitiveTypeName primitive :
new PrimitiveTypeName[] {INT64, INT96, FLOAT, DOUBLE, BOOLEAN, BINARY, FIXED_LEN_BYTE_ARRAY}) {
final PrimitiveType type;
if (primitive == FIXED_LEN_BYTE_ARRAY) {
type = new PrimitiveType(REQUIRED, primitive, 12, "test", TIME_MILLIS);
} else {
type = new PrimitiveType(REQUIRED, primitive, "test", TIME_MILLIS);
}
assertThrows(
"Should not allow TIME_MICROS with " + primitive,
IllegalArgumentException.class,
() -> new AvroSchemaConverter().convert(message(type)));
}
}
|
public static boolean isCompressedOops() {
// check HotSpot JVM implementation
Boolean enabled = isHotSpotCompressedOopsOrNull();
if (enabled != null) {
return enabled;
}
// fallback check for other JVM implementations
enabled = isObjectLayoutCompressedOopsOrNull();
if (enabled != null) {
return enabled;
}
// accept compressed oops is used by default
getLogger(JVMUtil.class).info("Could not determine memory cost of reference; setting to default of 4 bytes.");
return true;
}
|
@Test
public void testIsCompressedOops() {
JVMUtil.isCompressedOops();
}
|
@Override
public void clearRecursively(ExecutableTriggerStateMachine trigger) {
bitSet.clear(trigger.getTriggerIndex(), trigger.getFirstIndexAfterSubtree());
}
|
@Test
public void testClearRecursively() {
FinishedTriggersProperties.verifyClearRecursively(FinishedTriggersBitSet.emptyWithCapacity(1));
}
|
@POST
@ApiOperation(value = "Create a search query", response = SearchDTO.class, code = 201)
@AuditEvent(type = ViewsAuditEventTypes.SEARCH_CREATE)
@Consumes({MediaType.APPLICATION_JSON, SEARCH_FORMAT_V1})
@Produces({MediaType.APPLICATION_JSON, SEARCH_FORMAT_V1})
public Response createSearch(@ApiParam SearchDTO searchRequest, @Context SearchUser searchUser) {
final Search search = searchRequest.toSearch();
final Search saved = searchDomain.saveForUser(search, searchUser);
final SearchDTO result = SearchDTO.fromSearch(saved);
if (result == null || result.id() == null) {
return Response.serverError().build();
}
LOG.debug("Created new search object {}", result.id());
return Response.created(URI.create(result.id())).entity(result).build();
}
|
@Test
public void allowCreatingNewSearchWithoutId() {
final SearchDTO search = SearchDTO.Builder.create().id(null).build();
final SearchDomain searchDomain = mock(SearchDomain.class);
when(searchDomain.saveForUser(any(), any())).thenReturn(search.toSearch());
final SearchResource resource = new SearchResource(searchDomain, searchExecutor, searchJobService, eventBus, clusterConfigService);
final Response response = resource.createSearch(search, searchUser);
Assertions.assertThat(response.getStatus()).isEqualTo(Response.Status.CREATED.getStatusCode());
}
|
@Override
protected ServiceInstance doChoose(String serviceName, List<ServiceInstance> instances) {
final int index = ThreadLocalRandom.current().nextInt(instances.size());
return instances.get(index);
}
|
@Test
public void doChoose() {
final RandomLoadbalancer randomLoadbalancer = new RandomLoadbalancer();
int port1 = 9999;
int port2 = 8888;
String serviceName = "random";
final List<ServiceInstance> serviceInstances = Arrays
.asList(CommonUtils.buildInstance(serviceName, port1), CommonUtils.buildInstance(serviceName, port2));
int lastPort = 0;
int curPort;
boolean isOver = false;
for (int i = 0; i < 500; i++) {
// Simulate whether there will be a contiguous port, and if it exists, it will take effect randomly
final Optional<ServiceInstance> choose = randomLoadbalancer.choose(serviceName, serviceInstances);
Assert.assertTrue(choose.isPresent());
curPort = choose.get().getPort();
if (curPort == lastPort) {
isOver = true;
break;
}
lastPort = curPort;
}
Assert.assertTrue(isOver);
}
|
@ApiOperation(value = "Get a single timer job", tags = { "Jobs" })
@ApiResponses(value = {
@ApiResponse(code = 200, message = "Indicates the timer job exists and is returned."),
@ApiResponse(code = 404, message = "Indicates the requested job does not exist.")
})
@GetMapping(value = "/cmmn-management/timer-jobs/{jobId}", produces = "application/json")
public JobResponse getTimerJob(@ApiParam(name = "jobId") @PathVariable String jobId) {
Job job = getTimerJobById(jobId);
return restResponseFactory.createTimerJobResponse(job);
}
|
@Test
@CmmnDeployment(resources = { "org/flowable/cmmn/rest/service/api/management/timerEventListenerCase.cmmn" })
public void testGetTimerJob() throws Exception {
CaseInstance caseInstance = runtimeService.createCaseInstanceBuilder().caseDefinitionKey("testTimerExpression").start();
Job timerJob = managementService.createTimerJobQuery().caseInstanceId(caseInstance.getId()).singleResult();
assertThat(timerJob).isNotNull();
CloseableHttpResponse response = executeRequest(
new HttpGet(SERVER_URL_PREFIX + CmmnRestUrls.createRelativeResourceUrl(CmmnRestUrls.URL_TIMER_JOB, timerJob.getId())), HttpStatus.SC_OK);
JsonNode responseNode = objectMapper.readTree(response.getEntity().getContent());
closeResponse(response);
assertThat(responseNode).isNotNull();
assertThatJson(responseNode)
.when(Option.IGNORING_EXTRA_FIELDS)
.isEqualTo("{"
+ "id: '" + timerJob.getId() + "',"
+ "exceptionMessage: " + timerJob.getExceptionMessage() + ","
+ "planItemInstanceId: '" + timerJob.getSubScopeId() + "',"
+ "caseDefinitionId: '" + timerJob.getScopeDefinitionId() + "',"
+ "caseInstanceId: '" + timerJob.getScopeId() + "',"
+ "elementId: 'timerListener',"
+ "elementName: 'Timer listener',"
+ "handlerType: 'cmmn-trigger-timer',"
+ "retries: " + timerJob.getRetries() + ","
+ "dueDate: " + new TextNode(getISODateStringWithTZ(timerJob.getDuedate())) + ","
+ "tenantId: ''"
+ "}");
assertThat(responseNode.path("url").asText(null))
.endsWith(CmmnRestUrls.createRelativeResourceUrl(CmmnRestUrls.URL_TIMER_JOB, timerJob.getId()));
}
|
public List<MqttServerDefinition> getServerDefinitionList() {
List<MqttServerDefinition> serverDefinitionList;
if (ObjectHelper.isEmpty(servers)) {
serverDefinitionList = List.of();
} else if (!SERVER_DEF_PATTERN.matcher(servers).find()) {
throw new RuntimeCamelException("Server definition list has invalid syntax: " + servers);
} else {
Matcher serverDefMatcher = SERVER_DEF_PATTERN.matcher(servers);
serverDefinitionList = serverDefMatcher.results().map(matchResult -> {
// MatchResult does not support named groups
String serverName = matchResult.group(1);
String clientId = matchResult.group(2);
String serverUrl = matchResult.group(3);
return parseFromUrlString(serverName, clientId, serverUrl);
}).toList();
}
return serverDefinitionList;
}
|
@Test
public void checkEndpointUriServerDefsSharedClientId() {
String uri
= TahuConstants.EDGE_NODE_SCHEME
+ "://EndpointUri/ServerDefsSharedClientId?clientId=clientId2&username=user1&password=mysecretpassw0rd&keepAliveTimeout=45&servers=serverName1:tcp://localhost:1883,serverName2:tcp://localhost:1884";
TahuDefaultEndpoint endpoint = TestSupport.resolveMandatoryEndpoint(context, uri, TahuDefaultEndpoint.class);
assertThat(endpoint, is(notNullValue()));
assertThat(endpoint,
allOf(hasProperty("groupId", is("EndpointUri")),
hasProperty("edgeNode", is("ServerDefsSharedClientId"))));
TahuConfiguration configuration = endpoint.getConfiguration();
assertThat(configuration, is(notNullValue()));
assertThat(configuration,
allOf(hasProperty("clientId", is("clientId2")), hasProperty("checkClientIdLength", is(false)),
hasProperty("username", is("user1")),
hasProperty("password", is("mysecretpassw0rd")),
hasProperty("keepAliveTimeout", is(45))));
List<MqttServerDefinition> serverDefs = configuration.getServerDefinitionList();
assertThat(serverDefs, hasSize(2));
MqttServerDefinition serverDef = serverDefs.get(0);
assertThat(serverDef.getMqttServerName(), hasProperty("mqttServerName", is("serverName1")));
assertThat(serverDef.getMqttServerUrl(), hasProperty("mqttServerUrl", is("tcp://localhost:1883")));
serverDef = serverDefs.get(1);
assertThat(serverDef.getMqttServerName(), hasProperty("mqttServerName", is("serverName2")));
assertThat(serverDef.getMqttServerUrl(), hasProperty("mqttServerUrl", is("tcp://localhost:1884")));
assertThat(serverDefs,
hasItems(allOf(hasProperty("mqttClientId",
hasProperty("mqttClientId", is("clientId2"))),
hasProperty("username", is("user1")),
hasProperty("password", is("mysecretpassw0rd")),
hasProperty("keepAliveTimeout", is(45)),
hasProperty("ndeathTopic", is(nullValue())))));
}
|
public void fillMaxSpeed(Graph graph, EncodingManager em) {
// In DefaultMaxSpeedParser and in OSMMaxSpeedParser we don't have the rural/urban info,
// but now we have and can fill the country-dependent max_speed value where missing.
EnumEncodedValue<UrbanDensity> udEnc = em.getEnumEncodedValue(UrbanDensity.KEY, UrbanDensity.class);
fillMaxSpeed(graph, em, edge -> edge.get(udEnc) != UrbanDensity.RURAL);
}
|
@Test
public void testFwdOnly() {
ReaderWay way = new ReaderWay(0L);
way.setTag("country", Country.DEU);
way.setTag("highway", "primary");
way.setTag("maxspeed:forward", "50");
EdgeIteratorState edge = createEdge(way);
calc.fillMaxSpeed(graph, em);
assertEquals(50, edge.get(maxSpeedEnc), 1);
assertEquals(100, edge.getReverse(maxSpeedEnc), 1);
}
|
@Override
public Set<TransferItem> find(final CommandLine input, final TerminalAction action, final Path remote) throws AccessDeniedException {
if(input.getOptionValues(action.name()).length == 2) {
final String path = input.getOptionValues(action.name())[1];
// This only applies to a shell where the glob is not already expanded into multiple arguments
if(StringUtils.containsAny(path, '*', '?')) {
final Local directory = LocalFactory.get(FilenameUtils.getFullPath(path));
if(directory.isDirectory()) {
final Set<TransferItem> items = new HashSet<TransferItem>();
for(Local file : directory.list(new NullFilter<String>() {
@Override
public boolean accept(final String file) {
return FilenameUtils.wildcardMatch(file, PathNormalizer.name(path));
}
})) {
items.add(new TransferItem(new Path(remote, file.getName(), EnumSet.of(Path.Type.file)), file));
}
return items;
}
}
}
return new SingleTransferItemFinder().find(input, action, remote);
}
|
@Test
public void testNoLocalInOptionsDownload() throws Exception {
final CommandLineParser parser = new PosixParser();
final CommandLine input = parser.parse(TerminalOptionsBuilder.options(), new String[]{"--download", "rackspace://cdn.cyberduck.ch/remote"});
final Set<TransferItem> found = new GlobTransferItemFinder().find(input, TerminalAction.download, new Path("/cdn.cyberduck.ch/remote", EnumSet.of(Path.Type.file)));
assertFalse(found.isEmpty());
assertEquals(new TransferItem(new Path("/cdn.cyberduck.ch/remote", EnumSet.of(Path.Type.file)), LocalFactory.get(System.getProperty("user.dir") + "/remote")),
found.iterator().next());
}
|
public static List<String> listFileNames(String path) throws IORuntimeException {
if (path == null) {
return new ArrayList<>(0);
}
int index = path.lastIndexOf(FileUtil.JAR_PATH_EXT);
if (index < 0) {
// ๆฎ้็ฎๅฝ
final List<String> paths = new ArrayList<>();
final File[] files = ls(path);
for (File file : files) {
if (file.isFile()) {
paths.add(file.getName());
}
}
return paths;
}
// jarๆไปถ
path = getAbsolutePath(path);
// jarๆไปถไธญ็่ทฏๅพ
index = index + FileUtil.JAR_FILE_EXT.length();
JarFile jarFile = null;
try {
jarFile = new JarFile(path.substring(0, index));
// ้ฒๆญขๅบ็ฐjar!/cn/hutool/่ฟ็ฑป่ทฏๅพๅฏผ่ดๆไปถๆพไธๅฐ
return ZipUtil.listFileNames(jarFile, StrUtil.removePrefix(path.substring(index + 1), "/"));
} catch (IOException e) {
throw new IORuntimeException(StrUtil.format("Can not read file path of [{}]", path), e);
} finally {
IoUtil.close(jarFile);
}
}
|
@Test
@Disabled
public void listFileNamesInJarTest() {
final List<String> names = FileUtil.listFileNames("d:/test/hutool-core-5.1.0.jar!/cn/hutool/core/util ");
for (final String name : names) {
Console.log(name);
}
}
|
public static String normalizeDirectoryURI(URI dirURI) {
return normalizeDirectoryURI(dirURI.toString());
}
|
@Test
public void testNormalizeDirectoryURI() {
assertEquals("file:///path/to/dir/", MinionTaskUtils.normalizeDirectoryURI("file:///path/to/dir"));
assertEquals("file:///path/to/dir/", MinionTaskUtils.normalizeDirectoryURI("file:///path/to/dir/"));
}
|
@Override
public int getPort() {
return ((InetSocketAddress) serverChannel.localAddress()).getPort();
}
|
@Test
void badRequest() throws IOException {
int port = server.getPort();
String msg = "GET /status HTTP/1.1\n\n";
InetSocketAddress sockAddr = new InetSocketAddress("127.0.0.1", port);
try (Socket sock = new Socket()) {
sock.connect(sockAddr);
OutputStream out = sock.getOutputStream();
out.write(msg.getBytes(StandardCharsets.UTF_8));
out.flush();
byte[] buf = new byte[2048];
int bytesRead = sock.getInputStream().read(buf);
assertEquals(bytesRead, -1);
}
}
|
public void unlink(Name name) {
DirectoryEntry entry = remove(checkNotReserved(name, "unlink"));
entry.file().unlinked();
}
|
@Test
public void testUnlink_parentAndSelfNameFails() {
try {
dir.unlink(Name.simple("."));
fail();
} catch (IllegalArgumentException expected) {
}
try {
dir.unlink(Name.simple(".."));
fail();
} catch (IllegalArgumentException expected) {
}
}
|
@Subscribe
public void onChatMessage(ChatMessage event)
{
if (event.getType() == ChatMessageType.GAMEMESSAGE || event.getType() == ChatMessageType.SPAM)
{
String message = Text.removeTags(event.getMessage());
Matcher dodgyCheckMatcher = DODGY_CHECK_PATTERN.matcher(message);
Matcher dodgyProtectMatcher = DODGY_PROTECT_PATTERN.matcher(message);
Matcher dodgyBreakMatcher = DODGY_BREAK_PATTERN.matcher(message);
Matcher bindingNecklaceCheckMatcher = BINDING_CHECK_PATTERN.matcher(message);
Matcher bindingNecklaceUsedMatcher = BINDING_USED_PATTERN.matcher(message);
Matcher ringOfForgingCheckMatcher = RING_OF_FORGING_CHECK_PATTERN.matcher(message);
Matcher amuletOfChemistryCheckMatcher = AMULET_OF_CHEMISTRY_CHECK_PATTERN.matcher(message);
Matcher amuletOfChemistryUsedMatcher = AMULET_OF_CHEMISTRY_USED_PATTERN.matcher(message);
Matcher amuletOfChemistryBreakMatcher = AMULET_OF_CHEMISTRY_BREAK_PATTERN.matcher(message);
Matcher amuletOfBountyCheckMatcher = AMULET_OF_BOUNTY_CHECK_PATTERN.matcher(message);
Matcher amuletOfBountyUsedMatcher = AMULET_OF_BOUNTY_USED_PATTERN.matcher(message);
Matcher chronicleAddMatcher = CHRONICLE_ADD_PATTERN.matcher(message);
Matcher chronicleUseAndCheckMatcher = CHRONICLE_USE_AND_CHECK_PATTERN.matcher(message);
Matcher slaughterActivateMatcher = BRACELET_OF_SLAUGHTER_ACTIVATE_PATTERN.matcher(message);
Matcher slaughterCheckMatcher = BRACELET_OF_SLAUGHTER_CHECK_PATTERN.matcher(message);
Matcher expeditiousActivateMatcher = EXPEDITIOUS_BRACELET_ACTIVATE_PATTERN.matcher(message);
Matcher expeditiousCheckMatcher = EXPEDITIOUS_BRACELET_CHECK_PATTERN.matcher(message);
Matcher bloodEssenceCheckMatcher = BLOOD_ESSENCE_CHECK_PATTERN.matcher(message);
Matcher bloodEssenceExtractMatcher = BLOOD_ESSENCE_EXTRACT_PATTERN.matcher(message);
Matcher braceletOfClayCheckMatcher = BRACELET_OF_CLAY_CHECK_PATTERN.matcher(message);
if (message.contains(RING_OF_RECOIL_BREAK_MESSAGE))
{
notifier.notify(config.recoilNotification(), "Your Ring of Recoil has shattered");
}
else if (dodgyBreakMatcher.find())
{
notifier.notify(config.dodgyNotification(), "Your dodgy necklace has crumbled to dust.");
updateDodgyNecklaceCharges(MAX_DODGY_CHARGES);
}
else if (dodgyCheckMatcher.find())
{
updateDodgyNecklaceCharges(Integer.parseInt(dodgyCheckMatcher.group(1)));
}
else if (dodgyProtectMatcher.find())
{
updateDodgyNecklaceCharges(Integer.parseInt(dodgyProtectMatcher.group(1)));
}
else if (amuletOfChemistryCheckMatcher.find())
{
updateAmuletOfChemistryCharges(Integer.parseInt(amuletOfChemistryCheckMatcher.group(1)));
}
else if (amuletOfChemistryUsedMatcher.find())
{
final String match = amuletOfChemistryUsedMatcher.group(1);
int charges = 1;
if (!match.equals("one"))
{
charges = Integer.parseInt(match);
}
updateAmuletOfChemistryCharges(charges);
}
else if (amuletOfChemistryBreakMatcher.find())
{
notifier.notify(config.amuletOfChemistryNotification(), "Your amulet of chemistry has crumbled to dust.");
updateAmuletOfChemistryCharges(MAX_AMULET_OF_CHEMISTRY_CHARGES);
}
else if (amuletOfBountyCheckMatcher.find())
{
updateAmuletOfBountyCharges(Integer.parseInt(amuletOfBountyCheckMatcher.group(1)));
}
else if (amuletOfBountyUsedMatcher.find())
{
updateAmuletOfBountyCharges(Integer.parseInt(amuletOfBountyUsedMatcher.group(1)));
}
else if (message.equals(AMULET_OF_BOUNTY_BREAK_TEXT))
{
updateAmuletOfBountyCharges(MAX_AMULET_OF_BOUNTY_CHARGES);
}
else if (message.contains(BINDING_BREAK_TEXT))
{
notifier.notify(config.bindingNotification(), BINDING_BREAK_TEXT);
// This chat message triggers before the used message so add 1 to the max charges to ensure proper sync
updateBindingNecklaceCharges(MAX_BINDING_CHARGES + 1);
}
else if (bindingNecklaceUsedMatcher.find())
{
final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT);
if (equipment.contains(ItemID.BINDING_NECKLACE))
{
updateBindingNecklaceCharges(getItemCharges(ItemChargeConfig.KEY_BINDING_NECKLACE) - 1);
}
}
else if (bindingNecklaceCheckMatcher.find())
{
final String match = bindingNecklaceCheckMatcher.group(1);
int charges = 1;
if (!match.equals("one"))
{
charges = Integer.parseInt(match);
}
updateBindingNecklaceCharges(charges);
}
else if (ringOfForgingCheckMatcher.find())
{
final String match = ringOfForgingCheckMatcher.group(1);
int charges = 1;
if (!match.equals("one"))
{
charges = Integer.parseInt(match);
}
updateRingOfForgingCharges(charges);
}
else if (message.equals(RING_OF_FORGING_USED_TEXT) || message.equals(RING_OF_FORGING_VARROCK_PLATEBODY))
{
final ItemContainer inventory = client.getItemContainer(InventoryID.INVENTORY);
final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT);
// Determine if the player smelted with a Ring of Forging equipped.
if (equipment == null)
{
return;
}
if (equipment.contains(ItemID.RING_OF_FORGING) && (message.equals(RING_OF_FORGING_USED_TEXT) || inventory.count(ItemID.IRON_ORE) > 1))
{
int charges = Ints.constrainToRange(getItemCharges(ItemChargeConfig.KEY_RING_OF_FORGING) - 1, 0, MAX_RING_OF_FORGING_CHARGES);
updateRingOfForgingCharges(charges);
}
}
else if (message.equals(RING_OF_FORGING_BREAK_TEXT))
{
notifier.notify(config.ringOfForgingNotification(), "Your ring of forging has melted.");
// This chat message triggers before the used message so add 1 to the max charges to ensure proper sync
updateRingOfForgingCharges(MAX_RING_OF_FORGING_CHARGES + 1);
}
else if (chronicleAddMatcher.find())
{
final String match = chronicleAddMatcher.group(1);
if (match.equals("one"))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1);
}
else
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, Integer.parseInt(match));
}
}
else if (chronicleUseAndCheckMatcher.find())
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, Integer.parseInt(chronicleUseAndCheckMatcher.group(1)));
}
else if (message.equals(CHRONICLE_ONE_CHARGE_TEXT))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1);
}
else if (message.equals(CHRONICLE_EMPTY_TEXT) || message.equals(CHRONICLE_NO_CHARGES_TEXT))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 0);
}
else if (message.equals(CHRONICLE_FULL_TEXT))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1000);
}
else if (slaughterActivateMatcher.find())
{
final String found = slaughterActivateMatcher.group(1);
if (found == null)
{
updateBraceletOfSlaughterCharges(MAX_SLAYER_BRACELET_CHARGES);
notifier.notify(config.slaughterNotification(), BRACELET_OF_SLAUGHTER_BREAK_TEXT);
}
else
{
updateBraceletOfSlaughterCharges(Integer.parseInt(found));
}
}
else if (slaughterCheckMatcher.find())
{
updateBraceletOfSlaughterCharges(Integer.parseInt(slaughterCheckMatcher.group(1)));
}
else if (expeditiousActivateMatcher.find())
{
final String found = expeditiousActivateMatcher.group(1);
if (found == null)
{
updateExpeditiousBraceletCharges(MAX_SLAYER_BRACELET_CHARGES);
notifier.notify(config.expeditiousNotification(), EXPEDITIOUS_BRACELET_BREAK_TEXT);
}
else
{
updateExpeditiousBraceletCharges(Integer.parseInt(found));
}
}
else if (expeditiousCheckMatcher.find())
{
updateExpeditiousBraceletCharges(Integer.parseInt(expeditiousCheckMatcher.group(1)));
}
else if (bloodEssenceCheckMatcher.find())
{
updateBloodEssenceCharges(Integer.parseInt(bloodEssenceCheckMatcher.group(1)));
}
else if (bloodEssenceExtractMatcher.find())
{
updateBloodEssenceCharges(getItemCharges(ItemChargeConfig.KEY_BLOOD_ESSENCE) - Integer.parseInt(bloodEssenceExtractMatcher.group(1)));
}
else if (message.contains(BLOOD_ESSENCE_ACTIVATE_TEXT))
{
updateBloodEssenceCharges(MAX_BLOOD_ESSENCE_CHARGES);
}
else if (braceletOfClayCheckMatcher.find())
{
updateBraceletOfClayCharges(Integer.parseInt(braceletOfClayCheckMatcher.group(1)));
}
else if (message.equals(BRACELET_OF_CLAY_USE_TEXT) || message.equals(BRACELET_OF_CLAY_USE_TEXT_TRAHAEARN))
{
final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT);
// Determine if the player mined with a Bracelet of Clay equipped.
if (equipment != null && equipment.contains(ItemID.BRACELET_OF_CLAY))
{
final ItemContainer inventory = client.getItemContainer(InventoryID.INVENTORY);
// Charge is not used if only 1 inventory slot is available when mining in Prifddinas
boolean ignore = inventory != null
&& inventory.count() == 27
&& message.equals(BRACELET_OF_CLAY_USE_TEXT_TRAHAEARN);
if (!ignore)
{
int charges = Ints.constrainToRange(getItemCharges(ItemChargeConfig.KEY_BRACELET_OF_CLAY) - 1, 0, MAX_BRACELET_OF_CLAY_CHARGES);
updateBraceletOfClayCharges(charges);
}
}
}
else if (message.equals(BRACELET_OF_CLAY_BREAK_TEXT))
{
notifier.notify(config.braceletOfClayNotification(), "Your bracelet of clay has crumbled to dust");
updateBraceletOfClayCharges(MAX_BRACELET_OF_CLAY_CHARGES);
}
}
}
|
@Test
public void testExpeditiousBreak()
{
ChatMessage chatMessage = new ChatMessage(null, ChatMessageType.GAMEMESSAGE, "", BREAK_EXPEDITIOUS_BRACELET, "", 0);
itemChargePlugin.onChatMessage(chatMessage);
verify(configManager).setRSProfileConfiguration(ItemChargeConfig.GROUP, ItemChargeConfig.KEY_EXPEDITIOUS_BRACELET, 30);
}
|
@Override
public Num calculate(BarSeries series, Position position) {
final Num maxDrawdown = maxDrawdownCriterion.calculate(series, position);
if (maxDrawdown.isZero()) {
return NaN.NaN;
} else {
final Num totalProfit = grossReturnCriterion.calculate(series, position);
return totalProfit.dividedBy(maxDrawdown);
}
}
|
@Test
public void rewardRiskRatioCriterionWithNoPositions() {
MockBarSeries series = new MockBarSeries(numFunction, 1, 2, 3, 6, 8, 20, 3);
assertTrue(rrc.calculate(series, new BaseTradingRecord()).isNaN());
}
|
public int controlledPoll(final ControlledFragmentHandler handler, final int fragmentLimit)
{
if (isClosed)
{
return 0;
}
int fragmentsRead = 0;
long initialPosition = subscriberPosition.get();
int initialOffset = (int)initialPosition & termLengthMask;
int offset = initialOffset;
final UnsafeBuffer termBuffer = activeTermBuffer(initialPosition);
final int capacity = termBuffer.capacity();
final Header header = this.header;
header.buffer(termBuffer);
try
{
while (fragmentsRead < fragmentLimit && offset < capacity)
{
final int length = frameLengthVolatile(termBuffer, offset);
if (length <= 0)
{
break;
}
final int frameOffset = offset;
final int alignedLength = BitUtil.align(length, FRAME_ALIGNMENT);
offset += alignedLength;
if (isPaddingFrame(termBuffer, frameOffset))
{
continue;
}
++fragmentsRead;
header.offset(frameOffset);
final Action action = handler.onFragment(
termBuffer, frameOffset + HEADER_LENGTH, length - HEADER_LENGTH, header);
if (ABORT == action)
{
--fragmentsRead;
offset -= alignedLength;
break;
}
if (BREAK == action)
{
break;
}
if (COMMIT == action)
{
initialPosition += (offset - initialOffset);
initialOffset = offset;
subscriberPosition.setOrdered(initialPosition);
}
}
}
catch (final Exception ex)
{
errorHandler.onError(ex);
}
finally
{
final long resultingPosition = initialPosition + (offset - initialOffset);
if (resultingPosition > initialPosition)
{
subscriberPosition.setOrdered(resultingPosition);
}
}
return fragmentsRead;
}
|
@Test
void shouldPollNoFragmentsToControlledFragmentHandler()
{
final Image image = createImage();
final int fragmentsRead = image.controlledPoll(mockControlledFragmentHandler, Integer.MAX_VALUE);
assertThat(fragmentsRead, is(0));
verify(position, never()).setOrdered(anyLong());
verify(mockControlledFragmentHandler, never()).onFragment(
any(UnsafeBuffer.class), anyInt(), anyInt(), any(Header.class));
}
|
@Override
public BlockBuilder writeBytes(Slice source, int sourceIndex, int length)
{
initializeNewSegmentIfRequired();
openSliceOutput.writeBytes(source, sourceIndex, length);
return this;
}
|
@Test
public void testWriteBytes()
{
int entries = 100;
String inputChars = "abcdefghijklmnopqrstuvwwxyz01234566789!@#$%^";
SegmentedSliceBlockBuilder blockBuilder = new SegmentedSliceBlockBuilder(entries, inputChars.length());
List<String> values = new ArrayList<>();
Random rand = new Random(0);
byte[] bytes = inputChars.getBytes(UTF_8);
assertEquals(bytes.length, inputChars.length());
for (int i = 0; i < entries; i++) {
int valueLength = rand.nextInt(bytes.length);
VARCHAR.writeBytes(blockBuilder, bytes, 0, valueLength);
values.add(inputChars.substring(0, valueLength));
}
verifyBlockValues(blockBuilder, values);
}
|
public TolerantLongComparison isNotWithin(long tolerance) {
return new TolerantLongComparison() {
@Override
public void of(long expected) {
Long actual = LongSubject.this.actual;
checkNotNull(
actual, "actual value cannot be null. tolerance=%s expected=%s", tolerance, expected);
checkTolerance(tolerance);
if (equalWithinTolerance(actual, expected, tolerance)) {
failWithoutActual(
fact("expected not to be", Long.toString(expected)),
butWas(),
fact("within tolerance", Long.toString(tolerance)));
}
}
};
}
|
@Test
public void isNotWithinOf() {
assertThatIsNotWithinFails(20000L, 0L, 20000L);
assertThatIsNotWithinFails(20000L, 1L, 20000L);
assertThatIsNotWithinFails(20000L, 10000L, 20000L);
assertThatIsNotWithinFails(20000L, 10000L, 30000L);
assertThatIsNotWithinFails(Long.MIN_VALUE, 1L, Long.MIN_VALUE + 1);
assertThatIsNotWithinFails(Long.MAX_VALUE, 1L, Long.MAX_VALUE - 1);
assertThatIsNotWithinFails(Long.MAX_VALUE / 2, Long.MAX_VALUE, -Long.MAX_VALUE / 2);
assertThatIsNotWithinFails(-Long.MAX_VALUE / 2, Long.MAX_VALUE, Long.MAX_VALUE / 2);
assertThat(20000L).isNotWithin(9999L).of(30000L);
assertThat(20000L).isNotWithin(10000L).of(30001L);
assertThat(Long.MIN_VALUE).isNotWithin(0L).of(Long.MAX_VALUE);
assertThat(Long.MAX_VALUE).isNotWithin(0L).of(Long.MIN_VALUE);
assertThat(Long.MIN_VALUE).isNotWithin(1L).of(Long.MIN_VALUE + 2);
assertThat(Long.MAX_VALUE).isNotWithin(1L).of(Long.MAX_VALUE - 2);
// Don't fall for rollover
assertThat(Long.MIN_VALUE).isNotWithin(1L).of(Long.MAX_VALUE);
assertThat(Long.MAX_VALUE).isNotWithin(1L).of(Long.MIN_VALUE);
}
|
public ClientInvocationFuture invoke() {
clientMessage.setCorrelationId(callIdSequence.next());
invokeOnSelection();
return clientInvocationFuture;
}
|
@Test
public void invokeOnPartitionOwnerRedirectsToRandom_WhenPartitionOwnerIsnull() throws Exception {
hazelcastFactory.newHazelcastInstance();
HazelcastClientInstanceImpl client = getHazelcastClientInstanceImpl(hazelcastFactory.newHazelcastClient());
ClientMessage request = MapSizeCodec.encodeRequest("test");
int ownerlessPartition = 4000;
ClientInvocation invocation = new ClientInvocation(client, request, "map", ownerlessPartition);
assertEquals(0, MapSizeCodec.decodeResponse(invocation.invoke().get()));
}
|
public String buildRealData(final ConditionData condition, final ServerWebExchange exchange) {
return ParameterDataFactory.builderData(condition.getParamType(), condition.getParamName(), exchange);
}
|
@Test
public void testBuildRealDataHeaderBranch() {
conditionData.setParamType(ParamTypeEnum.HEADER.getName());
assertEquals("shenyuHeader", abstractMatchStrategy.buildRealData(conditionData, exchange));
}
|
public OrcWriterOptions.Builder toOrcWriterOptionsBuilder()
{
DefaultOrcWriterFlushPolicy flushPolicy = DefaultOrcWriterFlushPolicy.builder()
.withStripeMinSize(stripeMinSize)
.withStripeMaxSize(stripeMaxSize)
.withStripeMaxRowCount(stripeMaxRowCount)
.build();
OptionalInt resolvedCompressionLevel = OptionalInt.empty();
if (compressionLevel != DEFAULT_COMPRESSION_LEVEL) {
resolvedCompressionLevel = OptionalInt.of(compressionLevel);
}
// Give separate copy to callers for isolation.
return OrcWriterOptions.builder()
.withFlushPolicy(flushPolicy)
.withRowGroupMaxRowCount(rowGroupMaxRowCount)
.withDictionaryMaxMemory(dictionaryMaxMemory)
.withMaxStringStatisticsLimit(stringStatisticsLimit)
.withMaxCompressionBufferSize(maxCompressionBufferSize)
.withStreamLayoutFactory(getStreamLayoutFactory(streamLayoutType))
.withDwrfStripeCacheEnabled(isDwrfStripeCacheEnabled)
.withDwrfStripeCacheMaxSize(dwrfStripeCacheMaxSize)
.withDwrfStripeCacheMode(dwrfStripeCacheMode)
.withCompressionLevel(resolvedCompressionLevel);
}
|
@Test
public void testWithNoOptionsSet()
{
OrcFileWriterConfig config = new OrcFileWriterConfig();
// should succeed.
config.toOrcWriterOptionsBuilder().build();
}
|
@VisibleForTesting
protected void updateCache(DefaultTapiResolver resolver, DefaultContext context) {
updateNodes(resolver, getNodes(context));
updateNeps(resolver, getNeps(context));
}
|
@Test
public void testUpdateCacheWithoutSip() {
DcsBasedTapiDataProducer dataProvider = new DcsBasedTapiDataProducer();
DefaultTapiResolver mockResolver = EasyMock.createMock(DefaultTapiResolver.class);
topology.addToNode(node1);
topology.addToNode(node2);
node1.addToOwnedNodeEdgePoint(nep11);
node2.addToOwnedNodeEdgePoint(nep21);
node2.addToOwnedNodeEdgePoint(nep22);
List<TapiNodeRef> expectNodes = Arrays.asList(
DcsBasedTapiObjectRefFactory.create(topology, node1).setDeviceId(did1),
DcsBasedTapiObjectRefFactory.create(topology, node2).setDeviceId(did2)
);
List<TapiNepRef> expectNeps = Arrays.asList(
DcsBasedTapiObjectRefFactory.create(topology, node1, nep11).setConnectPoint(cp11),
DcsBasedTapiObjectRefFactory.create(topology, node2, nep21).setConnectPoint(cp21),
DcsBasedTapiObjectRefFactory.create(topology, node2, nep22).setConnectPoint(cp22)
);
mockResolver.addNodeRefList(expectNodes);
expectLastCall().once();
mockResolver.addNepRefList(expectNeps);
expectLastCall().once();
replay(mockResolver);
dataProvider.updateCache(mockResolver, context);
verify(mockResolver);
}
|
public Integer doCall() throws Exception {
List<Row> rows = new ArrayList<>();
List<Integration> integrations = client(Integration.class).list().getItems();
integrations
.forEach(integration -> {
Row row = new Row();
row.name = integration.getMetadata().getName();
row.ready = "0/1";
if (integration.getStatus() != null) {
row.phase = integration.getStatus().getPhase();
if (integration.getStatus().getConditions() != null) {
row.ready
= integration.getStatus().getConditions().stream().filter(c -> c.getType().equals("Ready"))
.anyMatch(c -> c.getStatus().equals("True")) ? "1/1" : "0/1";
}
row.kit = integration.getStatus().getIntegrationKit() != null
? integration.getStatus().getIntegrationKit().getName() : "";
} else {
row.phase = "Unknown";
}
rows.add(row);
});
if (!rows.isEmpty()) {
if (name) {
rows.forEach(r -> printer().println(r.name));
} else {
printer().println(AsciiTable.getTable(AsciiTable.NO_BORDERS, rows, Arrays.asList(
new Column().header("NAME").dataAlign(HorizontalAlign.LEFT)
.maxWidth(40, OverflowBehaviour.ELLIPSIS_RIGHT)
.with(r -> r.name),
new Column().header("PHASE").headerAlign(HorizontalAlign.LEFT)
.with(r -> r.phase),
new Column().header("KIT").headerAlign(HorizontalAlign.LEFT).with(r -> r.kit),
new Column().header("READY").dataAlign(HorizontalAlign.CENTER).with(r -> r.ready))));
}
}
return 0;
}
|
@Test
public void shouldListIntegrationNames() throws Exception {
Integration integration1 = createIntegration("foo");
Integration integration2 = createIntegration("bar");
kubernetesClient.resources(Integration.class).resource(integration1).create();
kubernetesClient.resources(Integration.class).resource(integration2).create();
IntegrationGet command = createCommand();
command.name = true;
command.doCall();
Assertions.assertEquals("foo\nbar", printer.getOutput());
}
|
@Override
public boolean isSupported() {
return true;
}
|
@Test
public void isSupported() {
ZTEImpl zte = new ZTEImpl(mApplication);
Assert.assertTrue(zte.isSupported());
}
|
@Override
public Collection<ThreadPoolPlugin> getAllPlugins() {
return Collections.emptyList();
}
|
@Test
public void testGetAllPlugins() {
Assert.assertEquals(Collections.emptyList(), manager.getAllPluginRuntimes());
}
|
public PublisherAgreement getPublisherAgreement(UserData user) {
var eclipseToken = checkEclipseToken(user);
var personId = user.getEclipsePersonId();
if (StringUtils.isEmpty(personId)) {
return null;
}
checkApiUrl();
var urlTemplate = eclipseApiUrl + "openvsx/publisher_agreement/{personId}";
var uriVariables = Map.of("personId", personId);
var headers = new HttpHeaders();
headers.setBearerAuth(eclipseToken.accessToken);
headers.setAccept(Arrays.asList(MediaType.APPLICATION_JSON));
var request = new HttpEntity<>(headers);
try {
var json = restTemplate.exchange(urlTemplate, HttpMethod.GET, request, String.class, uriVariables);
return parseAgreementResponse(json);
} catch (RestClientException exc) {
HttpStatusCode status = HttpStatus.INTERNAL_SERVER_ERROR;
if (exc instanceof HttpStatusCodeException) {
status = ((HttpStatusCodeException) exc).getStatusCode();
// The endpoint yields 404 if the specified user has not signed a publisher agreement
if (status == HttpStatus.NOT_FOUND)
return null;
}
var url = UriComponentsBuilder.fromUriString(urlTemplate).build(uriVariables);
logger.error("Get request failed with URL: " + url, exc);
throw new ErrorResultException("Request for retrieving publisher agreement failed: " + exc.getMessage(),
status);
}
}
|
@Test
public void testGetPublisherAgreement() throws Exception {
var user = mockUser();
user.setEclipsePersonId("test");
var urlTemplate = "https://test.openvsx.eclipse.org/openvsx/publisher_agreement/{personId}";
Mockito.when(restTemplate.exchange(eq(urlTemplate), eq(HttpMethod.GET), any(HttpEntity.class), eq(String.class), eq(Map.of("personId", "test"))))
.thenReturn(mockAgreementResponse());
var agreement = eclipse.getPublisherAgreement(user);
assertThat(agreement).isNotNull();
assertThat(agreement.isActive).isEqualTo(true);
assertThat(agreement.documentId).isEqualTo("abcd");
assertThat(agreement.version).isEqualTo("1");
assertThat(agreement.timestamp).isEqualTo(LocalDateTime.of(2020, 10, 9, 5, 10, 32));
}
|
@Override
public MatchType convert(@NotNull String type) {
if (type.contains(DELIMITER)) {
String[] matchType = type.split(DELIMITER);
return new MatchType(RateLimitType.valueOf(matchType[0].toUpperCase()), matchType[1]);
}
return new MatchType(RateLimitType.valueOf(type.toUpperCase()), null);
}
|
@Test
public void testConvertStringTypeHttpHeaderWithMatcher() {
MatchType matchType = target.convert("http_header=customHeader");
assertThat(matchType).isNotNull();
assertThat(matchType.getType()).isEqualByComparingTo(RateLimitType.HTTP_HEADER);
assertThat(matchType.getMatcher()).isEqualTo("customHeader");
}
|
@Override
public ResultSet getSuperTypes(final String catalog, final String schemaPattern, final String typeNamePattern) throws SQLException {
return createDatabaseMetaDataResultSet(getDatabaseMetaData().getSuperTypes(getActualCatalog(catalog), getActualSchema(schemaPattern), typeNamePattern));
}
|
@Test
void assertGetSuperTypes() throws SQLException {
when(databaseMetaData.getSuperTypes("test", null, null)).thenReturn(resultSet);
assertThat(shardingSphereDatabaseMetaData.getSuperTypes("test", null, null), instanceOf(DatabaseMetaDataResultSet.class));
}
|
@Override
public <T> @NonNull Schema schemaFor(TypeDescriptor<T> typeDescriptor) {
return schemaFor(typeDescriptor.getRawType());
}
|
@Test
public void testInnerStructSchemaWithSimpleTypedefs() {
// primitive typedefs don't need any special handling
final Schema schema =
defaultSchemaProvider.schemaFor(TypeDescriptor.of(TestThriftInnerStruct.class));
assertNotNull(schema);
assertEquals(TypeName.STRING, schema.getField("testNameTypedef").getType().getTypeName());
assertEquals(TypeName.INT16, schema.getField("testAgeTypedef").getType().getTypeName());
}
|
public static MappingRuleAction createRejectAction() {
return new RejectAction();
}
|
@Test
public void testRejectAction() {
VariableContext variables = new VariableContext();
MappingRuleAction reject = new MappingRuleActions.RejectAction();
MappingRuleAction rejectHelper = MappingRuleActions.createRejectAction();
assertRejectResult(reject.execute(variables));
assertRejectResult(rejectHelper.execute(variables));
}
|
public static HazelcastInstance newHazelcastInstance(Config config) {
if (config == null) {
config = Config.load();
}
return newHazelcastInstance(
config,
config.getInstanceName(),
new DefaultNodeContext()
);
}
|
@Test
public void mobyNameGeneratedIfSystemPropertyEnabled() {
Config config = new Config();
ClusterProperty.MOBY_NAMING_ENABLED.setSystemProperty("true");
hazelcastInstance = HazelcastInstanceFactory.newHazelcastInstance(config);
String name = hazelcastInstance.getName();
assertNotNull(name);
assertNotContains(name, "_hzInstance_");
}
|
public static String encodeOnionUrlV2(byte[] onionAddrBytes) {
checkArgument(onionAddrBytes.length == 10);
return BASE32.encode(onionAddrBytes) + ".onion";
}
|
@Test(expected = IllegalArgumentException.class)
public void encodeOnionUrlV3_badLength() {
TorUtils.encodeOnionUrlV2(new byte[33]);
}
|
public Optional<ShardingTable> findShardingTable(final String logicTableName) {
if (Strings.isNullOrEmpty(logicTableName) || !shardingTables.containsKey(logicTableName)) {
return Optional.empty();
}
return Optional.of(shardingTables.get(logicTableName));
}
|
@Test
void assertNotFindTableRule() {
assertFalse(createMaximumShardingRule().findShardingTable("other_Table").isPresent());
}
|
protected Timestamp convertBigNumberToTimestamp( BigDecimal bd ) {
if ( bd == null ) {
return null;
}
return convertIntegerToTimestamp( bd.longValue() );
}
|
@Test
public void testConvertBigNumberToTimestamp_Null() throws KettleValueException {
ValueMetaTimestamp valueMetaTimestamp = new ValueMetaTimestamp();
assertNull( valueMetaTimestamp.convertBigNumberToTimestamp( null ) );
}
|
@Override
public void releaseAllResources() throws IOException {
if (isReleased) {
return;
}
isReleased = true;
for (int index = 0; index < nettyPayloadManagers.size(); ++index) {
releaseQueue(
nettyPayloadManagers.get(index),
serviceProducers.get(index),
nettyConnectionIds.get(index));
}
}
|
@Test
void testReleaseAllResources() throws IOException {
tieredStorageResultSubpartitionView.releaseAllResources();
assertThat(nettyPayloadManagers.get(0).getBacklog()).isZero();
assertThat(nettyPayloadManagers.get(1).getBacklog()).isZero();
assertThat(connectionBrokenConsumers.get(0)).isDone();
assertThat(connectionBrokenConsumers.get(1)).isDone();
assertThat(tieredStorageResultSubpartitionView.isReleased()).isTrue();
}
|
@Override
public void writeDouble(final double v) throws IOException {
writeLong(Double.doubleToLongBits(v));
}
|
@Test
public void testWriteDoubleForVByteOrder() throws Exception {
double v = 1.1d;
out.writeDouble(v, LITTLE_ENDIAN);
long theLong = Double.doubleToLongBits(v);
long readLongB = Bits.readLongL(out.buffer, 0);
assertEquals(theLong, readLongB);
}
|
public StatementExecutorResponse execute(
final ConfiguredStatement<? extends Statement> statement,
final KsqlExecutionContext executionContext,
final KsqlSecurityContext securityContext
) {
final String commandRunnerWarningString = commandRunnerWarning.get();
if (!commandRunnerWarningString.equals("")) {
throw new KsqlServerException("Failed to handle Ksql Statement."
+ System.lineSeparator()
+ commandRunnerWarningString);
}
final InjectorWithSideEffects injector = InjectorWithSideEffects.wrap(
injectorFactory.apply(executionContext, securityContext.getServiceContext()));
final ConfiguredStatementWithSideEffects<?> injectedWithSideEffects =
injector.injectWithSideEffects(statement);
try {
return executeInjected(
injectedWithSideEffects.getStatement(),
statement,
executionContext,
securityContext);
} catch (Exception e) {
injector.revertSideEffects(injectedWithSideEffects);
throw e;
}
}
|
@Test
public void shouldThrowExceptionWhenInsertIntoProcessingLogTopic() {
// Given
final PreparedStatement<Statement> preparedStatement =
PreparedStatement.of("", new InsertInto(SourceName.of("s1"), mock(Query.class)));
final ConfiguredStatement<Statement> configured =
ConfiguredStatement.of(preparedStatement, SessionConfig.of(KSQL_CONFIG, ImmutableMap.of())
);
final DataSource dataSource = mock(DataSource.class);
doReturn(dataSource).when(metaStore).getSource(SourceName.of("s1"));
when(dataSource.getKafkaTopicName()).thenReturn("default_ksql_processing_log");
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> distributor.execute(configured, executionContext, mock(KsqlSecurityContext.class))
);
// Then:
assertThat(e.getMessage(), containsString(
"Cannot insert into read-only topic: "
+ "default_ksql_processing_log"));
}
|
public static RedissonClient create() {
Config config = new Config();
config.useSingleServer()
.setAddress("redis://127.0.0.1:6379");
return create(config);
}
|
@Test
public void testConfigValidation() {
Assertions.assertThrows(IllegalArgumentException.class, () -> {
Config redissonConfig = createConfig();
redissonConfig.useSingleServer()
.setConnectionPoolSize(2);
Redisson.create(redissonConfig);
});
}
|
@Override
public final void isEqualTo(@Nullable Object other) {
super.isEqualTo(other);
}
|
@Test
@GwtIncompatible("Math.nextAfter")
public void testDoubleConstants_matchNextAfter() {
assertThat(Math.nextAfter(Double.MIN_VALUE, 1.0)).isEqualTo(OVER_MIN);
assertThat(Math.nextAfter(1.23, Double.POSITIVE_INFINITY)).isEqualTo(OVER_GOLDEN);
assertThat(Math.nextAfter(Double.MAX_VALUE, 0.0)).isEqualTo(NEARLY_MAX);
assertThat(Math.nextAfter(-1.0 * Double.MAX_VALUE, 0.0)).isEqualTo(NEGATIVE_NEARLY_MAX);
assertThat(Math.nextAfter(-1.0 * Double.MIN_VALUE, -1.0)).isEqualTo(UNDER_NEGATIVE_MIN);
}
|
public RandomProjection(Matrix projection, String... columns) {
super(projection, "RP", columns);
}
|
@Test
public void testRandomProjection() {
System.out.println("regular random projection");
RandomProjection instance = RandomProjection.of(128, 40);
Matrix p = instance.projection;
Matrix t = p.aat();
System.out.println(p.toString(true));
for (int i = 0; i < 40; i++) {
assertEquals(1.0, t.get(i, i), 1E-10);
for (int j = 0; j < 40; j++) {
if (i != j) {
assertEquals(0.0, t.get(i, j), 1E-10);
}
}
}
}
|
@Override
@Transactional(value="defaultTransactionManager")
public OAuth2AccessTokenEntity createAccessToken(OAuth2Authentication authentication) throws AuthenticationException, InvalidClientException {
if (authentication != null && authentication.getOAuth2Request() != null) {
// look up our client
OAuth2Request request = authentication.getOAuth2Request();
ClientDetailsEntity client = clientDetailsService.loadClientByClientId(request.getClientId());
if (client == null) {
throw new InvalidClientException("Client not found: " + request.getClientId());
}
// handle the PKCE code challenge if present
if (request.getExtensions().containsKey(CODE_CHALLENGE)) {
String challenge = (String) request.getExtensions().get(CODE_CHALLENGE);
PKCEAlgorithm alg = PKCEAlgorithm.parse((String) request.getExtensions().get(CODE_CHALLENGE_METHOD));
String verifier = request.getRequestParameters().get(CODE_VERIFIER);
if (alg.equals(PKCEAlgorithm.plain)) {
// do a direct string comparison
if (!challenge.equals(verifier)) {
throw new InvalidRequestException("Code challenge and verifier do not match");
}
} else if (alg.equals(PKCEAlgorithm.S256)) {
// hash the verifier
try {
MessageDigest digest = MessageDigest.getInstance("SHA-256");
String hash = Base64URL.encode(digest.digest(verifier.getBytes(StandardCharsets.US_ASCII))).toString();
if (!challenge.equals(hash)) {
throw new InvalidRequestException("Code challenge and verifier do not match");
}
} catch (NoSuchAlgorithmException e) {
logger.error("Unknown algorithm for PKCE digest", e);
}
}
}
OAuth2AccessTokenEntity token = new OAuth2AccessTokenEntity();//accessTokenFactory.createNewAccessToken();
// attach the client
token.setClient(client);
// inherit the scope from the auth, but make a new set so it is
//not unmodifiable. Unmodifiables don't play nicely with Eclipselink, which
//wants to use the clone operation.
Set<SystemScope> scopes = scopeService.fromStrings(request.getScope());
// remove any of the special system scopes
scopes = scopeService.removeReservedScopes(scopes);
token.setScope(scopeService.toStrings(scopes));
// make it expire if necessary
if (client.getAccessTokenValiditySeconds() != null && client.getAccessTokenValiditySeconds() > 0) {
Date expiration = new Date(System.currentTimeMillis() + (client.getAccessTokenValiditySeconds() * 1000L));
token.setExpiration(expiration);
}
// attach the authorization so that we can look it up later
AuthenticationHolderEntity authHolder = new AuthenticationHolderEntity();
authHolder.setAuthentication(authentication);
authHolder = authenticationHolderRepository.save(authHolder);
token.setAuthenticationHolder(authHolder);
// attach a refresh token, if this client is allowed to request them and the user gets the offline scope
if (client.isAllowRefresh() && token.getScope().contains(SystemScopeService.OFFLINE_ACCESS)) {
OAuth2RefreshTokenEntity savedRefreshToken = createRefreshToken(client, authHolder);
token.setRefreshToken(savedRefreshToken);
}
//Add approved site reference, if any
OAuth2Request originalAuthRequest = authHolder.getAuthentication().getOAuth2Request();
if (originalAuthRequest.getExtensions() != null && originalAuthRequest.getExtensions().containsKey("approved_site")) {
Long apId = Long.parseLong((String) originalAuthRequest.getExtensions().get("approved_site"));
ApprovedSite ap = approvedSiteService.getById(apId);
token.setApprovedSite(ap);
}
OAuth2AccessTokenEntity enhancedToken = (OAuth2AccessTokenEntity) tokenEnhancer.enhance(token, authentication);
OAuth2AccessTokenEntity savedToken = saveAccessToken(enhancedToken);
if (savedToken.getRefreshToken() != null) {
tokenRepository.saveRefreshToken(savedToken.getRefreshToken()); // make sure we save any changes that might have been enhanced
}
return savedToken;
}
throw new AuthenticationCredentialsNotFoundException("No authentication credentials found");
}
|
@Test
public void createAccessToken_checkScopes() {
OAuth2AccessTokenEntity token = service.createAccessToken(authentication);
verify(scopeService, atLeastOnce()).removeReservedScopes(anySet());
assertThat(token.getScope(), equalTo(scope));
}
|
public static Locale localeFromString(String s) {
if (!s.contains(LOBAR)) {
return new Locale(s);
}
String[] items = s.split(LOBAR);
return new Locale(items[0], items[1]);
}
|
@Test
public void localeFromStringEnGB() {
title("localeFromStringEnGB");
locale = LionUtils.localeFromString("en_GB");
checkLanguageCountry(locale, "en", "GB");
}
|
public static KsqlAggregateFunction<?, ?, ?> resolveAggregateFunction(
final FunctionRegistry functionRegistry,
final FunctionCall functionCall,
final LogicalSchema schema,
final KsqlConfig config
) {
try {
final ExpressionTypeManager expressionTypeManager =
new ExpressionTypeManager(schema, functionRegistry);
final List<SqlType> args = functionCall.getArguments().stream()
.map(expressionTypeManager::getExpressionSqlType)
.collect(Collectors.toList());
final AggregateFunctionFactory.FunctionSource func = functionRegistry
.getAggregateFactory(functionCall.getName())
.getFunction(args);
final int totalArgs = functionCall.getArguments().size();
// All non-constant UDAF arguments must be column references
final List<Integer> argIndices = functionCall.getArguments().stream()
.limit(totalArgs - func.initArgs)
.map((arg) -> {
final Optional<Column> column;
if (arg instanceof UnqualifiedColumnReferenceExp) {
final UnqualifiedColumnReferenceExp colRef =
(UnqualifiedColumnReferenceExp) arg;
column = schema.findValueColumn(colRef.getColumnName());
} else {
// assume that it is a column reference with no alias
column = schema.findValueColumn(ColumnName.of(arg.toString()));
}
return column.orElseThrow(
() -> new KsqlException("Could not find column for expression: " + arg)
);
}).map(Column::index).collect(Collectors.toList());
return func.source.apply(createAggregateFunctionInitArgs(
func.initArgs,
argIndices,
functionCall,
config
));
} catch (final Exception e) {
throw new KsqlException("Failed to create aggregate function: " + functionCall, e);
}
}
|
@Test
public void shouldGetAggregateWithCorrectType() {
// When:
UdafUtil.resolveAggregateFunction(functionRegistry, FUNCTION_CALL, SCHEMA, KsqlConfig.empty());
// Then:
verify(functionFactory).getFunction(
eq(Collections.singletonList(SqlTypes.BIGINT))
);
}
|
@SneakyThrows // compute() doesn't throw checked exceptions
public static String sha256Hex(String string) {
return sha256DigestCache.get(string, () -> compute(string, DigestObjectPools.SHA_256));
}
|
@Test
public void shouldComputeForAnEmptyStringUsingSHA_256() {
String fingerprint = "";
String digest = sha256Hex(fingerprint);
assertEquals(DigestUtils.sha256Hex(fingerprint), digest);
}
|
public static <InputT, OutputT> MapElements<InputT, OutputT> via(
final InferableFunction<InputT, OutputT> fn) {
return new MapElements<>(fn, fn.getInputTypeDescriptor(), fn.getOutputTypeDescriptor());
}
|
@Test
public void testInferableFunctionDisplayData() {
InferableFunction<Integer, ?> inferableFn =
new InferableFunction<Integer, Integer>() {
@Override
public Integer apply(Integer input) {
return input;
}
@Override
public void populateDisplayData(DisplayData.Builder builder) {
builder.add(DisplayData.item("foo", "baz"));
}
};
MapElements<?, ?> inferableMap = MapElements.via(inferableFn);
assertThat(DisplayData.from(inferableMap), hasDisplayItem("class", inferableFn.getClass()));
assertThat(DisplayData.from(inferableMap), hasDisplayItem("foo", "baz"));
}
|
private void merge(ContentNodeStats stats, int factor) {
for (Map.Entry<String, BucketSpaceStats> entry : stats.bucketSpaces.entrySet()) {
BucketSpaceStats statsToUpdate = bucketSpaces.get(entry.getKey());
if (statsToUpdate == null && factor == 1) {
statsToUpdate = BucketSpaceStats.empty();
bucketSpaces.put(entry.getKey(), statsToUpdate);
}
if (statsToUpdate != null) {
statsToUpdate.merge(entry.getValue(), factor);
}
}
}
|
@Test
void bucket_space_stats_tracks_multiple_layers_of_invalid() {
BucketSpaceStats stats = BucketSpaceStats.invalid();
stats.merge(BucketSpaceStats.invalid(), 1);
assertFalse(stats.valid());
stats.merge(BucketSpaceStats.invalid(), 1);
assertFalse(stats.valid());
stats.merge(BucketSpaceStats.of(5, 1), 1);
assertFalse(stats.valid());
stats.merge(BucketSpaceStats.invalid(), -1);
assertFalse(stats.valid());
stats.merge(BucketSpaceStats.invalid(), -1);
assertFalse(stats.valid());
stats.merge(BucketSpaceStats.invalid(), -1);
assertTrue(stats.valid());
assertEquals(BucketSpaceStats.of(5, 1), stats);
}
|
@Override
public int hashCode()
{
return Objects.hash(_status);
}
|
@Test(dataProvider = "testHashCodeDataProvider")
public void testHashCode
(
boolean hasSameHashCode,
@Nonnull UpdateResponse updateResponse1,
@Nonnull UpdateResponse updateResponse2
)
{
if (hasSameHashCode)
{
assertEquals(updateResponse1.hashCode(), updateResponse2.hashCode());
}
else
{
assertNotEquals(updateResponse1.hashCode(), updateResponse2.hashCode());
}
}
|
@Udf
public String concatWS(
@UdfParameter(description = "Separator string and values to join") final String... inputs) {
if (inputs == null || inputs.length < 2) {
throw new KsqlFunctionException("Function Concat_WS expects at least two input arguments.");
}
final String separator = inputs[0];
if (separator == null) {
return null;
}
return Arrays.stream(inputs, 1,
inputs.length)
.filter(Objects::nonNull)
.collect(Collectors.joining(separator));
}
|
@Test
public void shouldConcatStrings() {
assertThat(udf.concatWS(" ", "The", "Quick", "Brown", "Fox"), is("The Quick Brown Fox"));
}
|
public ApplicationDescription getApplicationDescription(String appName) {
try {
XMLConfiguration cfg = new XMLConfiguration();
cfg.setAttributeSplittingDisabled(true);
cfg.setDelimiterParsingDisabled(true);
cfg.load(appFile(appName, APP_XML));
return loadAppDescription(cfg);
} catch (Exception e) {
throw new ApplicationException("Unable to get app description", e);
}
}
|
@Test(expected = ApplicationException.class)
public void getBadAppDesc() throws IOException {
aar.getApplicationDescription("org.foo.BAD");
}
|
@Override
List<DiscoveryNode> resolveNodes() {
if (serviceName != null && !serviceName.isEmpty()) {
logger.fine("Using service name to discover nodes.");
return getSimpleDiscoveryNodes(client.endpointsByName(serviceName));
} else if (serviceLabel != null && !serviceLabel.isEmpty()) {
logger.fine("Using service label to discover nodes.");
return getSimpleDiscoveryNodes(client.endpointsByServiceLabel(serviceLabel, serviceLabelValue));
} else if (podLabel != null && !podLabel.isEmpty()) {
logger.fine("Using pod label to discover nodes.");
return getSimpleDiscoveryNodes(client.endpointsByPodLabel(podLabel, podLabelValue));
}
return getSimpleDiscoveryNodes(client.endpoints());
}
|
@Test
public void resolveWhenNodeInFound() {
// given
List<Endpoint> endpoints = Collections.emptyList();
given(client.endpoints()).willReturn(endpoints);
KubernetesApiEndpointResolver sut = new KubernetesApiEndpointResolver(LOGGER, null, 0, null, null, null, null, null, client);
// when
List<DiscoveryNode> nodes = sut.resolveNodes();
// then
assertEquals(0, nodes.size());
}
|
@Override
public EurekaHttpClient newClient(EurekaEndpoint endpoint) {
// we want a copy to modify. Don't change the original
WebClient.Builder builder = this.builderSupplier.get().clone();
setUrl(builder, endpoint.getServiceUrl());
setCodecs(builder);
builder.filter(http4XxErrorExchangeFilterFunction());
return new WebClientEurekaHttpClient(builder.build());
}
|
@Test
void testInvalidUserInfo() {
transportClientFatory.newClient(new DefaultEndpoint("http://test@localhost:8761"));
}
|
public static ShowResultSet execute(ShowStmt statement, ConnectContext context) {
return GlobalStateMgr.getCurrentState().getShowExecutor().showExecutorVisitor.visit(statement, context);
}
|
@Test
public void testShowRoutineLoadNonExisted() throws AnalysisException, DdlException {
ShowRoutineLoadStmt stmt = new ShowRoutineLoadStmt(new LabelName("testDb", "non-existed-job-name"), false);
// AnalysisException("There is no job named...") is expected.
Assert.assertThrows(SemanticException.class, () -> ShowExecutor.execute(stmt, ctx));
}
|
@Override
public boolean registerAllRequestsProcessedListener(NotificationListener listener)
throws IOException {
return super.registerAllRequestsProcessedListener(listener);
}
|
@Test
void testSubscribe() throws Exception {
final TestNotificationListener listener = new TestNotificationListener();
// Unsuccessful subscription, because no outstanding requests
assertThat(writer.registerAllRequestsProcessedListener(listener))
.withFailMessage("Allowed to subscribe w/o any outstanding requests.")
.isFalse();
// Successful subscription
addRequest();
assertThat(writer.registerAllRequestsProcessedListener(listener))
.withFailMessage("Didn't allow to subscribe.")
.isTrue();
// Test notification
handleRequest();
assertThat(listener.getNumberOfNotifications())
.withFailMessage("Listener was not notified.")
.isOne();
}
|
public boolean isSatisfiedBy(Date date) {
Calendar testDateCal = Calendar.getInstance(getTimeZone());
testDateCal.setTime(date);
testDateCal.set(Calendar.MILLISECOND, 0);
Date originalDate = testDateCal.getTime();
testDateCal.add(Calendar.SECOND, -1);
Date timeAfter = getTimeAfter(testDateCal.getTime());
return ((timeAfter != null) && (timeAfter.equals(originalDate)));
}
|
@Test
public void testIsSatisfiedBy() throws Exception {
CronExpression cronExpression = new CronExpression("0 15 10 * * ? 2005");
Calendar cal = Calendar.getInstance();
cal.set(2005, Calendar.JUNE, 1, 10, 15, 0);
assertThat(cronExpression.isSatisfiedBy(cal.getTime())).isTrue();
cal.set(Calendar.YEAR, 2006);
assertThat(cronExpression.isSatisfiedBy(cal.getTime())).isFalse();
cal = Calendar.getInstance();
cal.set(2005, Calendar.JUNE, 1, 10, 16, 0);
assertThat(cronExpression.isSatisfiedBy(cal.getTime())).isFalse();
cal = Calendar.getInstance();
cal.set(2005, Calendar.JUNE, 1, 10, 14, 0);
assertThat(cronExpression.isSatisfiedBy(cal.getTime())).isFalse();
}
|
private CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> consumerGroupHeartbeat(
String groupId,
String memberId,
int memberEpoch,
String instanceId,
String rackId,
int rebalanceTimeoutMs,
String clientId,
String clientHost,
List<String> subscribedTopicNames,
String assignorName,
List<ConsumerGroupHeartbeatRequestData.TopicPartitions> ownedTopicPartitions
) throws ApiException {
final long currentTimeMs = time.milliseconds();
final List<CoordinatorRecord> records = new ArrayList<>();
// Get or create the consumer group.
boolean createIfNotExists = memberEpoch == 0;
final ConsumerGroup group = getOrMaybeCreateConsumerGroup(groupId, createIfNotExists, records);
throwIfConsumerGroupIsFull(group, memberId);
// Get or create the member.
if (memberId.isEmpty()) memberId = Uuid.randomUuid().toString();
final ConsumerGroupMember member;
if (instanceId == null) {
member = getOrMaybeSubscribeDynamicConsumerGroupMember(
group,
memberId,
memberEpoch,
ownedTopicPartitions,
createIfNotExists,
false
);
} else {
member = getOrMaybeSubscribeStaticConsumerGroupMember(
group,
memberId,
memberEpoch,
instanceId,
ownedTopicPartitions,
createIfNotExists,
false,
records
);
}
// 1. Create or update the member. If the member is new or has changed, a ConsumerGroupMemberMetadataValue
// record is written to the __consumer_offsets partition to persist the change. If the subscriptions have
// changed, the subscription metadata is updated and persisted by writing a ConsumerGroupPartitionMetadataValue
// record to the __consumer_offsets partition. Finally, the group epoch is bumped if the subscriptions have
// changed, and persisted by writing a ConsumerGroupMetadataValue record to the partition.
ConsumerGroupMember updatedMember = new ConsumerGroupMember.Builder(member)
.maybeUpdateInstanceId(Optional.ofNullable(instanceId))
.maybeUpdateRackId(Optional.ofNullable(rackId))
.maybeUpdateRebalanceTimeoutMs(ofSentinel(rebalanceTimeoutMs))
.maybeUpdateServerAssignorName(Optional.ofNullable(assignorName))
.maybeUpdateSubscribedTopicNames(Optional.ofNullable(subscribedTopicNames))
.setClientId(clientId)
.setClientHost(clientHost)
.setClassicMemberMetadata(null)
.build();
boolean bumpGroupEpoch = hasMemberSubscriptionChanged(
groupId,
member,
updatedMember,
records
);
int groupEpoch = group.groupEpoch();
Map<String, TopicMetadata> subscriptionMetadata = group.subscriptionMetadata();
Map<String, Integer> subscribedTopicNamesMap = group.subscribedTopicNames();
SubscriptionType subscriptionType = group.subscriptionType();
if (bumpGroupEpoch || group.hasMetadataExpired(currentTimeMs)) {
// The subscription metadata is updated in two cases:
// 1) The member has updated its subscriptions;
// 2) The refresh deadline has been reached.
subscribedTopicNamesMap = group.computeSubscribedTopicNames(member, updatedMember);
subscriptionMetadata = group.computeSubscriptionMetadata(
subscribedTopicNamesMap,
metadataImage.topics(),
metadataImage.cluster()
);
int numMembers = group.numMembers();
if (!group.hasMember(updatedMember.memberId()) && !group.hasStaticMember(updatedMember.instanceId())) {
numMembers++;
}
subscriptionType = ModernGroup.subscriptionType(
subscribedTopicNamesMap,
numMembers
);
if (!subscriptionMetadata.equals(group.subscriptionMetadata())) {
log.info("[GroupId {}] Computed new subscription metadata: {}.",
groupId, subscriptionMetadata);
bumpGroupEpoch = true;
records.add(newConsumerGroupSubscriptionMetadataRecord(groupId, subscriptionMetadata));
}
if (bumpGroupEpoch) {
groupEpoch += 1;
records.add(newConsumerGroupEpochRecord(groupId, groupEpoch));
log.info("[GroupId {}] Bumped group epoch to {}.", groupId, groupEpoch);
metrics.record(CONSUMER_GROUP_REBALANCES_SENSOR_NAME);
}
group.setMetadataRefreshDeadline(currentTimeMs + consumerGroupMetadataRefreshIntervalMs, groupEpoch);
}
// 2. Update the target assignment if the group epoch is larger than the target assignment epoch. The delta between
// the existing and the new target assignment is persisted to the partition.
final int targetAssignmentEpoch;
final Assignment targetAssignment;
if (groupEpoch > group.assignmentEpoch()) {
targetAssignment = updateTargetAssignment(
group,
groupEpoch,
member,
updatedMember,
subscriptionMetadata,
subscriptionType,
records
);
targetAssignmentEpoch = groupEpoch;
} else {
targetAssignmentEpoch = group.assignmentEpoch();
targetAssignment = group.targetAssignment(updatedMember.memberId(), updatedMember.instanceId());
}
// 3. Reconcile the member's assignment with the target assignment if the member is not
// fully reconciled yet.
updatedMember = maybeReconcile(
groupId,
updatedMember,
group::currentPartitionEpoch,
targetAssignmentEpoch,
targetAssignment,
ownedTopicPartitions,
records
);
scheduleConsumerGroupSessionTimeout(groupId, memberId);
// Prepare the response.
ConsumerGroupHeartbeatResponseData response = new ConsumerGroupHeartbeatResponseData()
.setMemberId(updatedMember.memberId())
.setMemberEpoch(updatedMember.memberEpoch())
.setHeartbeatIntervalMs(consumerGroupHeartbeatIntervalMs(groupId));
// The assignment is only provided in the following cases:
// 1. The member sent a full request. It does so when joining or rejoining the group with zero
// as the member epoch; or on any errors (e.g. timeout). We use all the non-optional fields
// (rebalanceTimeoutMs, subscribedTopicNames and ownedTopicPartitions) to detect a full request
// as those must be set in a full request.
// 2. The member's assignment has been updated.
boolean isFullRequest = memberEpoch == 0 || (rebalanceTimeoutMs != -1 && subscribedTopicNames != null && ownedTopicPartitions != null);
if (isFullRequest || hasAssignedPartitionsChanged(member, updatedMember)) {
response.setAssignment(createConsumerGroupResponseAssignment(updatedMember));
}
return new CoordinatorResult<>(records, response);
}
|
@Test
public void testNoGroupEpochBumpWhenStaticMemberTemporarilyLeaves() {
String groupId = "fooup";
// Use a static member id as it makes the test easier.
String memberId1 = Uuid.randomUuid().toString();
String memberId2 = Uuid.randomUuid().toString();
Uuid fooTopicId = Uuid.randomUuid();
String fooTopicName = "foo";
Uuid barTopicId = Uuid.randomUuid();
String barTopicName = "bar";
MockPartitionAssignor assignor = new MockPartitionAssignor("range");
ConsumerGroupMember member1 = new ConsumerGroupMember.Builder(memberId1)
.setState(MemberState.STABLE)
.setInstanceId(memberId1)
.setMemberEpoch(10)
.setPreviousMemberEpoch(9)
.setClientId(DEFAULT_CLIENT_ID)
.setClientHost(DEFAULT_CLIENT_ADDRESS.toString())
.setSubscribedTopicNames(Arrays.asList("foo", "bar"))
.setServerAssignorName("range")
.setAssignedPartitions(mkAssignment(
mkTopicAssignment(fooTopicId, 0, 1, 2),
mkTopicAssignment(barTopicId, 0, 1)))
.build();
ConsumerGroupMember member2 = new ConsumerGroupMember.Builder(memberId2)
.setState(MemberState.STABLE)
.setInstanceId(memberId2)
.setMemberEpoch(10)
.setPreviousMemberEpoch(9)
.setClientId(DEFAULT_CLIENT_ID)
.setClientHost(DEFAULT_CLIENT_ADDRESS.toString())
// Use zar only here to ensure that metadata needs to be recomputed.
.setSubscribedTopicNames(Arrays.asList("foo", "bar"))
.setServerAssignorName("range")
.setAssignedPartitions(mkAssignment(
mkTopicAssignment(fooTopicId, 3, 4, 5),
mkTopicAssignment(barTopicId, 2)))
.build();
// Consumer group with two static members.
GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder()
.withConsumerGroupAssignors(Collections.singletonList(assignor))
.withMetadataImage(new MetadataImageBuilder()
.addTopic(fooTopicId, fooTopicName, 6)
.addTopic(barTopicId, barTopicName, 3)
.addRacks()
.build())
.withConsumerGroup(new ConsumerGroupBuilder(groupId, 10)
.withMember(member1)
.withMember(member2)
.withAssignment(memberId1, mkAssignment(
mkTopicAssignment(fooTopicId, 0, 1, 2),
mkTopicAssignment(barTopicId, 0, 1)))
.withAssignment(memberId2, mkAssignment(
mkTopicAssignment(fooTopicId, 3, 4, 5),
mkTopicAssignment(barTopicId, 2)))
.withAssignmentEpoch(10))
.build();
// Member 2 leaves the consumer group.
CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> result = context.consumerGroupHeartbeat(
new ConsumerGroupHeartbeatRequestData()
.setGroupId(groupId)
.setMemberId(memberId2)
.setInstanceId(memberId2)
.setMemberEpoch(LEAVE_GROUP_STATIC_MEMBER_EPOCH)
.setRebalanceTimeoutMs(5000)
.setSubscribedTopicNames(Arrays.asList("foo", "bar"))
.setTopicPartitions(Collections.emptyList()));
// member epoch of the response would be set to -2
assertResponseEquals(
new ConsumerGroupHeartbeatResponseData()
.setMemberId(memberId2)
.setMemberEpoch(LEAVE_GROUP_STATIC_MEMBER_EPOCH),
result.response()
);
ConsumerGroupMember member2UpdatedEpoch = new ConsumerGroupMember
.Builder(member2)
.setMemberEpoch(LEAVE_GROUP_STATIC_MEMBER_EPOCH)
.build();
assertEquals(1, result.records().size());
assertRecordEquals(result.records().get(0), GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, member2UpdatedEpoch));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.