focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
public static LogCollector<ShenyuRequestLog> getInstance() {
return INSTANCE;
}
|
@Test
public void testAbstractLogCollector() throws Exception {
ElasticSearchLogCollector.getInstance().start();
Field field1 = AbstractLogCollector.class.getDeclaredField("started");
field1.setAccessible(true);
Assertions.assertEquals(field1.get(ElasticSearchLogCollector.getInstance()).toString(), "true");
ElasticSearchLogCollector.getInstance().collect(shenyuRequestLog);
ElasticSearchLogCollector.getInstance().close();
Field field2 = AbstractLogCollector.class.getDeclaredField("started");
field2.setAccessible(true);
Assertions.assertEquals(field2.get(ElasticSearchLogCollector.getInstance()).toString(), "false");
}
|
@Activate
public void activate() {
eventDispatcher.addSink(ApplicationEvent.class, listenerRegistry);
store.setDelegate(delegate);
log.info("Started");
}
|
@Test
public void activate() {
install();
mgr.activate(APP_ID);
assertEquals("incorrect app state", ACTIVE, mgr.getState(APP_ID));
assertFalse("preDeactivate hook wrongly called", deactivated);
}
|
public void checkLimit() {
if (!rateLimiter.tryAcquire()) {
rejectSensor.record();
throw new KsqlRateLimitException("Host is at rate limit for pull queries. Currently set to "
+ rateLimiter.getRate() + " qps.");
}
}
|
@Test
public void shouldSucceedUnderLimit() {
final Metrics metrics = new Metrics();
final Map<String, String> tags = Collections.emptyMap();
// It doesn't look like the underlying guava rate limiter has a way to control time, so we're
// just going to have to hope that these tests reliably run in under a second.
final RateLimiter limiter = new RateLimiter(1, METRIC_NAMESPACE, metrics, tags);
assertThat(getReject(metrics, tags), is(0.0));
limiter.checkLimit();
assertThat(getReject(metrics, tags), is(0.0));
}
|
public static String formatSql(final AstNode root) {
final StringBuilder builder = new StringBuilder();
new Formatter(builder).process(root, 0);
return StringUtils.stripEnd(builder.toString(), "\n");
}
|
@Test
public void shouldFormatTerminateQuery() {
// Given:
final TerminateQuery terminateQuery = TerminateQuery.query(Optional.empty(), new QueryId("FOO"));
// When:
final String formatted = SqlFormatter.formatSql(terminateQuery);
// Then:
assertThat(formatted, is("TERMINATE FOO"));
}
|
@Override
public InetSocketAddress getRemoteAddress() {
return channel.getRemoteAddress();
}
|
@Test
void getRemoteAddressTest() {
Assertions.assertNull(header.getRemoteAddress());
}
|
public static Map<String, String> resolve(ServerWebExchange exchange) {
Map<String, String> result = new HashMap<>();
HttpHeaders headers = exchange.getRequest().getHeaders();
for (Map.Entry<String, List<String>> entry : headers.entrySet()) {
String key = entry.getKey();
if (StringUtils.isBlank(key)) {
continue;
}
// resolve sct transitive header
if (StringUtils.startsWithIgnoreCase(key, MetadataConstant.SCT_TRANSITIVE_HEADER_PREFIX)
&& !CollectionUtils.isEmpty(entry.getValue())) {
String sourceKey = StringUtils.substring(key, MetadataConstant.SCT_TRANSITIVE_HEADER_PREFIX_LENGTH);
result.put(sourceKey, entry.getValue().get(0));
}
//resolve polaris transitive header
if (StringUtils.startsWithIgnoreCase(key, MetadataConstant.POLARIS_TRANSITIVE_HEADER_PREFIX)
&& !CollectionUtils.isEmpty(entry.getValue())) {
String sourceKey = StringUtils.substring(key, MetadataConstant.POLARIS_TRANSITIVE_HEADER_PREFIX_LENGTH);
result.put(sourceKey, entry.getValue().get(0));
}
}
return result;
}
|
@Test
public void testPolarisServletTransitiveMetadata() {
MockHttpServletRequest request = new MockHttpServletRequest();
request.addHeader("X-Polaris-Metadata-Transitive-a", "test");
Map<String, String> resolve = CustomTransitiveMetadataResolver.resolve(request);
assertThat(resolve.size()).isEqualTo(1);
assertThat(resolve.get("a")).isEqualTo("test");
}
|
public static int getCombatLevel(int attackLevel, int strengthLevel,
int defenceLevel, int hitpointsLevel, int magicLevel,
int rangeLevel, int prayerLevel)
{
return (int) getCombatLevelPrecise(attackLevel, strengthLevel, defenceLevel, hitpointsLevel, magicLevel, rangeLevel, prayerLevel);
}
|
@Test
public void testGetCombatLevel()
{
assertEquals(126, Experience.getCombatLevel(99, 99, 99, 99, 70, 42, 98));
assertEquals(40, Experience.getCombatLevel(27, 22, 1, 36, 64, 45, 1));
}
|
@Override
public void checkBeforeUpdate(final AlterReadwriteSplittingRuleStatement sqlStatement) {
ReadwriteSplittingRuleStatementChecker.checkAlteration(database, sqlStatement.getRules(), rule.getConfiguration());
}
|
@Test
void assertCheckSQLStatementWithDuplicateWriteResourceNames() {
ShardingSphereDatabase database = mock(ShardingSphereDatabase.class, RETURNS_DEEP_STUBS);
when(database.getResourceMetaData()).thenReturn(resourceMetaData);
ReadwriteSplittingRule rule = mock(ReadwriteSplittingRule.class);
when(rule.getConfiguration()).thenReturn(createCurrentRuleConfigurationWithMultipleRules());
executor.setRule(rule);
assertThrows(DuplicateReadwriteSplittingActualDataSourceException.class,
() -> executor.checkBeforeUpdate(
createSQLStatement("readwrite_ds_0", "ds_write_1", Arrays.asList("read_ds_0", "read_ds_1"), "TEST")));
}
|
public void hasLength(int expectedLength) {
checkArgument(expectedLength >= 0, "expectedLength(%s) must be >= 0", expectedLength);
check("length()").that(checkNotNull(actual).length()).isEqualTo(expectedLength);
}
|
@Test
public void hasLength() {
assertThat("kurt").hasLength(4);
}
|
public void onClose()
{
if (asyncTaskExecutor instanceof ExecutorService)
{
try
{
final ExecutorService executor = (ExecutorService)asyncTaskExecutor;
executor.shutdownNow();
if (!executor.awaitTermination(EXECUTOR_SHUTDOWN_TIMEOUT_SECONDS, TimeUnit.SECONDS))
{
ctx.errorHandler().onError(new AeronEvent("failed to shutdown async task executor"));
}
}
catch (final Exception e)
{
ctx.errorHandler().onError(e);
}
}
CloseHelper.close(ctx.errorHandler(), nameResolver);
publicationImages.forEach(PublicationImage::free);
networkPublications.forEach(NetworkPublication::free);
ipcPublications.forEach(IpcPublication::free);
freeEndOfLifeResources(Integer.MAX_VALUE);
toDriverCommands.consumerHeartbeatTime(Aeron.NULL_VALUE);
ctx.cncByteBuffer().force();
ctx.close();
}
|
@Test
void onCloseShouldNotifyIfExecutorDoesNotCloseOnTime(@TempDir final Path dir) throws InterruptedException
{
final ExecutorService asyncTaskExecutor = mock(ExecutorService.class);
final DriverConductor conductor = new DriverConductor(ctx.clone()
.cncByteBuffer(IoUtil.mapNewFile(dir.resolve("some.txt").toFile(), 64))
.asyncTaskExecutor(asyncTaskExecutor));
conductor.onClose();
final InOrder inOrder = inOrder(asyncTaskExecutor, mockErrorHandler);
inOrder.verify(asyncTaskExecutor).shutdownNow();
inOrder.verify(asyncTaskExecutor).awaitTermination(EXECUTOR_SHUTDOWN_TIMEOUT_SECONDS, SECONDS);
inOrder.verify(mockErrorHandler).onError(argThat(
arg -> arg instanceof AeronEvent &&
arg.getMessage().equals("WARN - failed to shutdown async task executor")));
inOrder.verifyNoMoreInteractions();
}
|
@Override
public String toString() {
return toString(true);
}
|
@Test
public void testToStringNoQuota() {
long length = 11111;
long fileCount = 22222;
long directoryCount = 33333;
ContentSummary contentSummary = new ContentSummary.Builder().length(length).
fileCount(fileCount).directoryCount(directoryCount).build();
String expected = " none inf none"
+ " inf 33333 22222 11111 ";
assertEquals(expected, contentSummary.toString(true));
}
|
public List<ColumnMatchResult<?>> getMismatchedColumns(List<Column> columns, ChecksumResult controlChecksum, ChecksumResult testChecksum)
{
return columns.stream()
.flatMap(column -> columnValidators.get(column.getCategory()).get().validate(column, controlChecksum, testChecksum).stream())
.filter(columnMatchResult -> !columnMatchResult.isMatched())
.collect(toImmutableList());
}
|
@Test
public void testFloatingPointCloseToZero()
{
List<Column> columns = ImmutableList.of(DOUBLE_COLUMN, REAL_COLUMN);
// Matched
ChecksumResult controlChecksum = new ChecksumResult(
5,
ImmutableMap.<String, Object>builder()
.putAll(FLOATING_POINT_COUNTS)
.put("double$sum", -4.9e-12)
.put("real$sum", 4.9e-12)
.build());
ChecksumResult testChecksum = new ChecksumResult(
5,
ImmutableMap.<String, Object>builder()
.putAll(FLOATING_POINT_COUNTS)
.put("double$sum", 4.9e-12)
.put("real$sum", 0.0)
.build());
assertTrue(checksumValidator.getMismatchedColumns(columns, controlChecksum, testChecksum).isEmpty());
// Mismatched
controlChecksum = new ChecksumResult(
5,
ImmutableMap.<String, Object>builder()
.putAll(FLOATING_POINT_COUNTS)
.put("double$sum", 0.0)
.put("real$sum", 5.1e-12)
.build());
testChecksum = new ChecksumResult(
5,
ImmutableMap.<String, Object>builder()
.putAll(FLOATING_POINT_COUNTS)
.put("double$sum", 5.1e-12)
.put("real$sum", 0.0)
.build());
assertMismatchedColumns(columns, controlChecksum, testChecksum, DOUBLE_COLUMN, REAL_COLUMN);
}
|
public PushOffsetVector mergeCopy(final OffsetVector other) {
final PushOffsetVector copy = copy();
copy.merge(other);
return copy;
}
|
@Test
public void shouldMerge_empty() {
// Given:
PushOffsetVector pushOffsetVector1 = new PushOffsetVector(ImmutableList.of(1L, 2L, 3L));
PushOffsetVector pushOffsetVector2 = new PushOffsetVector();
// Then:
assertThat(pushOffsetVector1.mergeCopy(pushOffsetVector2),
is(new PushOffsetVector(ImmutableList.of(1L, 2L, 3L))));
assertThat(pushOffsetVector2.mergeCopy(pushOffsetVector1),
is(new PushOffsetVector(ImmutableList.of(1L, 2L, 3L))));
}
|
public B ondisconnect(String ondisconnect) {
this.ondisconnect = ondisconnect;
return getThis();
}
|
@Test
void ondisconnect() {
InterfaceBuilder builder = new InterfaceBuilder();
builder.ondisconnect("ondisconnect");
Assertions.assertEquals("ondisconnect", builder.build().getOndisconnect());
}
|
public static List<SqlGatewayEndpoint> createSqlGatewayEndpoint(
SqlGatewayService service, Configuration configuration) {
List<String> identifiers = configuration.get(SQL_GATEWAY_ENDPOINT_TYPE);
if (identifiers == null || identifiers.isEmpty()) {
throw new ValidationException(
String.format(
"Endpoint options do not contain an option key '%s' for discovering an endpoint.",
SQL_GATEWAY_ENDPOINT_TYPE.key()));
}
validateSpecifiedEndpointsAreUnique(identifiers);
List<SqlGatewayEndpoint> endpoints = new ArrayList<>();
for (String identifier : identifiers) {
final SqlGatewayEndpointFactory factory =
FactoryUtil.discoverFactory(
Thread.currentThread().getContextClassLoader(),
SqlGatewayEndpointFactory.class,
identifier);
endpoints.add(
factory.createSqlGatewayEndpoint(
new DefaultEndpointFactoryContext(
service,
configuration,
getEndpointConfig(configuration, identifier))));
}
return endpoints;
}
|
@Test
public void testCreateEndpoints() {
String id = UUID.randomUUID().toString();
Map<String, String> config = getDefaultConfig(id);
config.put("sql-gateway.endpoint.type", "mocked;fake");
List<SqlGatewayEndpoint> actual =
createSqlGatewayEndpoint(
new MockedSqlGatewayService(), Configuration.fromMap(config));
MockedSqlGatewayEndpoint expectedMocked =
new MockedSqlGatewayEndpoint(id, "localhost", 9999, "Hello World.");
assertThat(actual)
.isEqualTo(Arrays.asList(expectedMocked, FakeSqlGatewayEndpoint.INSTANCE));
}
|
public static <T> Write<T> write(String jdbcUrl, String table) {
return new AutoValue_ClickHouseIO_Write.Builder<T>()
.jdbcUrl(jdbcUrl)
.table(table)
.properties(new Properties())
.maxInsertBlockSize(DEFAULT_MAX_INSERT_BLOCK_SIZE)
.initialBackoff(DEFAULT_INITIAL_BACKOFF)
.maxRetries(DEFAULT_MAX_RETRIES)
.maxCumulativeBackoff(DEFAULT_MAX_CUMULATIVE_BACKOFF)
.build()
.withInsertDeduplicate(true)
.withInsertDistributedSync(true);
}
|
@Test
public void testTupleType() throws Exception {
Schema tupleSchema =
Schema.of(
Schema.Field.of("f0", FieldType.STRING), Schema.Field.of("f1", FieldType.BOOLEAN));
Schema schema = Schema.of(Schema.Field.of("t0", FieldType.row(tupleSchema)));
Row row1Tuple = Row.withSchema(tupleSchema).addValue("tuple").addValue(true).build();
Row row1 = Row.withSchema(schema).addValue(row1Tuple).build();
executeSql(
"CREATE TABLE test_named_tuples (" + "t0 Tuple(`f0` String, `f1` Bool)" + ") ENGINE=Log");
pipeline.apply(Create.of(row1).withRowSchema(schema)).apply(write("test_named_tuples"));
pipeline.run().waitUntilFinish();
try (ResultSet rs = executeQuery("SELECT * FROM test_named_tuples")) {
rs.next();
assertEquals("[tuple, true]", rs.getString("t0"));
}
try (ResultSet rs = executeQuery("SELECT t0.f0 as f0, t0.f1 as f1 FROM test_named_tuples")) {
rs.next();
assertEquals("tuple", rs.getString("f0"));
assertEquals("true", rs.getString("f1"));
}
}
|
public LogicalSchema resolve(final ExecutionStep<?> step, final LogicalSchema schema) {
return Optional.ofNullable(HANDLERS.get(step.getClass()))
.map(h -> h.handle(this, schema, step))
.orElseThrow(() -> new IllegalStateException("Unhandled step class: " + step.getClass()));
}
|
@Test
public void shouldResolveSchemaForWindowedTableSource() {
// Given:
final WindowedTableSource step = new WindowedTableSource(
PROPERTIES,
"foo",
formats,
mock(WindowInfo.class),
Optional.empty(),
SCHEMA,
OptionalInt.of(SystemColumns.CURRENT_PSEUDOCOLUMN_VERSION_NUMBER)
);
// When:
final LogicalSchema result = resolver.resolve(step, SCHEMA);
// Then:
assertThat(result, is(SCHEMA.withPseudoAndKeyColsInValue(true)));
}
|
boolean handleCorruption(final Set<TaskId> corruptedTasks) {
final Set<TaskId> activeTasks = new HashSet<>(tasks.activeTaskIds());
// We need to stop all processing, since we need to commit non-corrupted tasks as well.
maybeLockTasks(activeTasks);
final Set<Task> corruptedActiveTasks = new HashSet<>();
final Set<Task> corruptedStandbyTasks = new HashSet<>();
for (final TaskId taskId : corruptedTasks) {
final Task task = tasks.task(taskId);
if (task.isActive()) {
corruptedActiveTasks.add(task);
} else {
corruptedStandbyTasks.add(task);
}
}
// Make sure to clean up any corrupted standby tasks in their entirety before committing
// since TaskMigrated can be thrown and the resulting handleLostAll will only clean up active tasks
closeDirtyAndRevive(corruptedStandbyTasks, true);
// We need to commit before closing the corrupted active tasks since this will force the ongoing txn to abort
try {
final Collection<Task> tasksToCommit = tasks.allTasksPerId()
.values()
.stream()
.filter(t -> t.state() == Task.State.RUNNING)
.filter(t -> !corruptedTasks.contains(t.id()))
.collect(Collectors.toSet());
commitTasksAndMaybeUpdateCommittableOffsets(tasksToCommit, new HashMap<>());
} catch (final TaskCorruptedException e) {
log.info("Some additional tasks were found corrupted while trying to commit, these will be added to the " +
"tasks to clean and revive: {}", e.corruptedTasks());
corruptedActiveTasks.addAll(tasks.tasks(e.corruptedTasks()));
} catch (final TimeoutException e) {
log.info("Hit TimeoutException when committing all non-corrupted tasks, these will be closed and revived");
final Collection<Task> uncorruptedTasks = new HashSet<>(tasks.activeTasks());
uncorruptedTasks.removeAll(corruptedActiveTasks);
// Those tasks which just timed out can just be closed dirty without marking changelogs as corrupted
closeDirtyAndRevive(uncorruptedTasks, false);
}
closeDirtyAndRevive(corruptedActiveTasks, true);
maybeUnlockTasks(activeTasks);
return !corruptedActiveTasks.isEmpty();
}
|
@Test
public void shouldReAddRevivedTasksToStateUpdater() {
final StreamTask corruptedActiveTask = statefulTask(taskId03, taskId03ChangelogPartitions)
.inState(State.RESTORING)
.withInputPartitions(taskId03Partitions).build();
final StandbyTask corruptedStandbyTask = standbyTask(taskId02, taskId02ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId02Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManager(ProcessingMode.AT_LEAST_ONCE, tasks, true);
when(tasks.task(taskId03)).thenReturn(corruptedActiveTask);
when(tasks.task(taskId02)).thenReturn(corruptedStandbyTask);
taskManager.handleCorruption(mkSet(corruptedActiveTask.id(), corruptedStandbyTask.id()));
final InOrder activeTaskOrder = inOrder(corruptedActiveTask);
activeTaskOrder.verify(corruptedActiveTask).closeDirty();
activeTaskOrder.verify(corruptedActiveTask).revive();
final InOrder standbyTaskOrder = inOrder(corruptedStandbyTask);
standbyTaskOrder.verify(corruptedStandbyTask).closeDirty();
standbyTaskOrder.verify(corruptedStandbyTask).revive();
verify(tasks).removeTask(corruptedActiveTask);
verify(tasks).removeTask(corruptedStandbyTask);
verify(tasks).addPendingTasksToInit(mkSet(corruptedActiveTask));
verify(tasks).addPendingTasksToInit(mkSet(corruptedStandbyTask));
verify(consumer).assignment();
}
|
public static MemberSelector or(MemberSelector... selectors) {
return new OrMemberSelector(selectors);
}
|
@Test
public void testOrMemberSelector() {
when(member.localMember()).thenReturn(true);
MemberSelector selector = MemberSelectors.or(LOCAL_MEMBER_SELECTOR, LITE_MEMBER_SELECTOR);
assertTrue(selector.select(member));
verify(member).localMember();
verify(member, never()).isLiteMember();
}
|
public static Schema toBeamSchema(Class<?> clazz) {
ReflectData data = new ReflectData(clazz.getClassLoader());
return toBeamSchema(data.getSchema(clazz));
}
|
@Test
public void testFromAvroSchema() {
assertEquals(getBeamSchema(), AvroUtils.toBeamSchema(getAvroSchema()));
}
|
@Override
public String getAbsolute() {
return path;
}
|
@Test
public void testPathContainer() {
final Path path = new Path(new Path("test.cyberduck.ch",
EnumSet.of(Path.Type.volume, Path.Type.directory)), "/test", EnumSet.of(Path.Type.directory));
assertEquals("/test.cyberduck.ch/test", path.getAbsolute());
}
|
private CompletableFuture<Boolean> verifyTxnOwnership(TxnID txnID) {
assert ctx.executor().inEventLoop();
return service.pulsar().getTransactionMetadataStoreService()
.verifyTxnOwnership(txnID, getPrincipal())
.thenComposeAsync(isOwner -> {
if (isOwner) {
return CompletableFuture.completedFuture(true);
}
if (service.isAuthenticationEnabled() && service.isAuthorizationEnabled()) {
return isSuperUser();
} else {
return CompletableFuture.completedFuture(false);
}
}, ctx.executor());
}
|
@Test(timeOut = 30000)
public void sendAddPartitionToTxnResponseFailed() throws Exception {
final TransactionMetadataStoreService txnStore = mock(TransactionMetadataStoreService.class);
when(txnStore.getTxnMeta(any())).thenReturn(CompletableFuture.completedFuture(mock(TxnMeta.class)));
when(txnStore.verifyTxnOwnership(any(), any())).thenReturn(CompletableFuture.completedFuture(true));
when(txnStore.addProducedPartitionToTxn(any(TxnID.class), any()))
.thenReturn(CompletableFuture.failedFuture(new RuntimeException("server error")));
when(pulsar.getTransactionMetadataStoreService()).thenReturn(txnStore);
svcConfig.setTransactionCoordinatorEnabled(true);
resetChannel();
setChannelConnected();
ByteBuf clientCommand = Commands.newAddPartitionToTxn(89L, 1L, 12L,
List.of("tenant/ns/topic1"));
channel.writeInbound(clientCommand);
CommandAddPartitionToTxnResponse response = (CommandAddPartitionToTxnResponse) getResponse();
assertEquals(response.getRequestId(), 89L);
assertEquals(response.getTxnidLeastBits(), 1L);
assertEquals(response.getTxnidMostBits(), 12L);
assertEquals(response.getError().getValue(), 0);
assertEquals(response.getMessage(), "server error");
channel.finish();
}
|
public static String validateIndexName(@Nullable String indexName) {
checkDbIdentifier(indexName, "Index name", INDEX_NAME_MAX_SIZE);
return indexName;
}
|
@Test
public void validateIndexName_returns_valid_name() {
assertThat(validateIndexName("foo")).isEqualTo("foo");
}
|
@Override
public void destroy() {
if (this.sqsClient != null) {
try {
this.sqsClient.shutdown();
} catch (Exception e) {
log.error("Failed to shutdown SQS client during destroy()", e);
}
}
}
|
@Test
void givenSqsClientIsNotNull_whenDestroy_thenShutdown() {
node.destroy();
then(sqsClientMock).should().shutdown();
}
|
public static UDoWhileLoop create(UStatement body, UExpression condition) {
return new AutoValue_UDoWhileLoop((USimpleStatement) body, condition);
}
|
@Test
public void equality() {
new EqualsTester()
.addEqualityGroup(
UDoWhileLoop.create(
UBlock.create(
UExpressionStatement.create(
UAssign.create(
ULocalVarIdent.create("old"),
UMethodInvocation.create(
UMemberSelect.create(
UFreeIdent.create("str"),
"indexOf",
UMethodType.create(
UPrimitiveType.INT,
UPrimitiveType.INT,
UPrimitiveType.INT)),
ULiteral.charLit(' '),
UBinary.create(
Kind.PLUS,
ULocalVarIdent.create("old"),
ULiteral.intLit(1)))))),
UParens.create(
UBinary.create(
Kind.NOT_EQUAL_TO, ULocalVarIdent.create("old"), ULiteral.intLit(-1)))))
.addEqualityGroup(
UDoWhileLoop.create(
UBlock.create(
UExpressionStatement.create(
UAssign.create(
ULocalVarIdent.create("old"),
UMethodInvocation.create(
UMemberSelect.create(
UFreeIdent.create("str"),
"indexOf",
UMethodType.create(
UPrimitiveType.INT,
UPrimitiveType.INT,
UPrimitiveType.INT)),
ULiteral.charLit(' '),
UBinary.create(
Kind.PLUS,
ULocalVarIdent.create("old"),
ULiteral.intLit(1)))))),
UParens.create(
UBinary.create(
Kind.GREATER_THAN_EQUAL,
ULocalVarIdent.create("old"),
ULiteral.intLit(0)))))
.testEquals();
}
|
public static Optional<Expression> convert(
org.apache.flink.table.expressions.Expression flinkExpression) {
if (!(flinkExpression instanceof CallExpression)) {
return Optional.empty();
}
CallExpression call = (CallExpression) flinkExpression;
Operation op = FILTERS.get(call.getFunctionDefinition());
if (op != null) {
switch (op) {
case IS_NULL:
return onlyChildAs(call, FieldReferenceExpression.class)
.map(FieldReferenceExpression::getName)
.map(Expressions::isNull);
case NOT_NULL:
return onlyChildAs(call, FieldReferenceExpression.class)
.map(FieldReferenceExpression::getName)
.map(Expressions::notNull);
case LT:
return convertFieldAndLiteral(Expressions::lessThan, Expressions::greaterThan, call);
case LT_EQ:
return convertFieldAndLiteral(
Expressions::lessThanOrEqual, Expressions::greaterThanOrEqual, call);
case GT:
return convertFieldAndLiteral(Expressions::greaterThan, Expressions::lessThan, call);
case GT_EQ:
return convertFieldAndLiteral(
Expressions::greaterThanOrEqual, Expressions::lessThanOrEqual, call);
case EQ:
return convertFieldAndLiteral(
(ref, lit) -> {
if (NaNUtil.isNaN(lit)) {
return Expressions.isNaN(ref);
} else {
return Expressions.equal(ref, lit);
}
},
call);
case NOT_EQ:
return convertFieldAndLiteral(
(ref, lit) -> {
if (NaNUtil.isNaN(lit)) {
return Expressions.notNaN(ref);
} else {
return Expressions.notEqual(ref, lit);
}
},
call);
case NOT:
return onlyChildAs(call, CallExpression.class)
.flatMap(FlinkFilters::convert)
.map(Expressions::not);
case AND:
return convertLogicExpression(Expressions::and, call);
case OR:
return convertLogicExpression(Expressions::or, call);
case STARTS_WITH:
return convertLike(call);
}
}
return Optional.empty();
}
|
@Test
public void testOr() {
Expression expr =
resolve(
Expressions.$("field1")
.isEqual(Expressions.lit(1))
.or(Expressions.$("field2").isEqual(Expressions.lit(2L))));
Optional<org.apache.iceberg.expressions.Expression> actual = FlinkFilters.convert(expr);
assertThat(actual).isPresent();
Or or = (Or) actual.get();
Or expected =
(Or)
org.apache.iceberg.expressions.Expressions.or(
org.apache.iceberg.expressions.Expressions.equal("field1", 1),
org.apache.iceberg.expressions.Expressions.equal("field2", 2L));
assertPredicatesMatch(expected.left(), or.left());
assertPredicatesMatch(expected.right(), or.right());
}
|
public static boolean isSelectable(Transaction tx, Network network) {
// Only pick chain-included transactions, or transactions that are ours and pending.
TransactionConfidence confidence = tx.getConfidence();
TransactionConfidence.ConfidenceType type = confidence.getConfidenceType();
return type.equals(TransactionConfidence.ConfidenceType.BUILDING) ||
type.equals(TransactionConfidence.ConfidenceType.PENDING) &&
confidence.getSource().equals(TransactionConfidence.Source.SELF) &&
// In regtest mode we expect to have only one peer, so we won't see transactions propagate.
(confidence.numBroadcastPeers() > 0 || network == BitcoinNetwork.REGTEST);
}
|
@Test
public void selectable() throws Exception {
Transaction t;
t = new Transaction();
t.getConfidence().setConfidenceType(TransactionConfidence.ConfidenceType.PENDING);
assertFalse(DefaultCoinSelector.isSelectable(t, TESTNET));
t.getConfidence().setSource(TransactionConfidence.Source.SELF);
assertFalse(DefaultCoinSelector.isSelectable(t, TESTNET));
t.getConfidence().markBroadcastBy(PeerAddress.simple(InetAddress.getByName("1.2.3.4"), TESTNET_PARAMS.getPort()));
assertTrue(DefaultCoinSelector.isSelectable(t, TESTNET));
t.getConfidence().markBroadcastBy(PeerAddress.simple(InetAddress.getByName("5.6.7.8"), TESTNET_PARAMS.getPort()));
assertTrue(DefaultCoinSelector.isSelectable(t, TESTNET));
t = new Transaction();
t.getConfidence().setConfidenceType(TransactionConfidence.ConfidenceType.BUILDING);
assertTrue(DefaultCoinSelector.isSelectable(t, TESTNET));
t = new Transaction();
t.getConfidence().setConfidenceType(TransactionConfidence.ConfidenceType.PENDING);
t.getConfidence().setSource(TransactionConfidence.Source.SELF);
assertTrue(DefaultCoinSelector.isSelectable(t, REGTEST));
}
|
public static Set<Port> parse(List<String> ports) throws NumberFormatException {
Set<Port> result = new HashSet<>();
for (String port : ports) {
Matcher matcher = portPattern.matcher(port);
if (!matcher.matches()) {
throw new NumberFormatException(
"Invalid port configuration: '"
+ port
+ "'. Make sure the port is a single number or a range of two numbers separated "
+ "with a '-', with or without protocol specified (e.g. '<portNum>/tcp' or "
+ "'<portNum>/udp').");
}
// Parse protocol
int min = Integer.parseInt(matcher.group(1));
int max = min;
if (!Strings.isNullOrEmpty(matcher.group(2))) {
max = Integer.parseInt(matcher.group(2));
}
String protocol = matcher.group(3);
// Error if configured as 'max-min' instead of 'min-max'
if (min > max) {
throw new NumberFormatException(
"Invalid port range '" + port + "'; smaller number must come first.");
}
// Warn for possibly invalid port numbers
if (min < 1 || max > 65535) {
throw new NumberFormatException(
"Port number '" + port + "' is out of usual range (1-65535).");
}
for (int portNumber = min; portNumber <= max; portNumber++) {
result.add(Port.parseProtocol(portNumber, protocol));
}
}
return result;
}
|
@Test
public void testParse() {
List<String> goodInputs =
Arrays.asList("1000", "2000-2003", "3000-3000", "4000/tcp", "5000/udp", "6000-6002/udp");
ImmutableSet<Port> expected =
new ImmutableSet.Builder<Port>()
.add(
Port.tcp(1000),
Port.tcp(2000),
Port.tcp(2001),
Port.tcp(2002),
Port.tcp(2003),
Port.tcp(3000),
Port.tcp(4000),
Port.udp(5000),
Port.udp(6000),
Port.udp(6001),
Port.udp(6002))
.build();
Set<Port> result = Ports.parse(goodInputs);
Assert.assertEquals(expected, result);
List<String> badInputs = Arrays.asList("abc", "/udp", "1000/abc", "a100/tcp", "20/udpabc");
for (String input : badInputs) {
try {
Ports.parse(Collections.singletonList(input));
Assert.fail();
} catch (NumberFormatException ex) {
Assert.assertEquals(
"Invalid port configuration: '"
+ input
+ "'. Make sure the port is a single number or a range of two numbers separated "
+ "with a '-', with or without protocol specified (e.g. '<portNum>/tcp' or "
+ "'<portNum>/udp').",
ex.getMessage());
}
}
try {
Ports.parse(Collections.singletonList("4002-4000"));
Assert.fail();
} catch (NumberFormatException ex) {
Assert.assertEquals(
"Invalid port range '4002-4000'; smaller number must come first.", ex.getMessage());
}
badInputs = Arrays.asList("0", "70000", "0-400", "1-70000");
for (String input : badInputs) {
try {
Ports.parse(Collections.singletonList(input));
Assert.fail();
} catch (NumberFormatException ex) {
Assert.assertEquals(
"Port number '" + input + "' is out of usual range (1-65535).", ex.getMessage());
}
}
}
|
@Nullable
public static ByteBuf accumulate(
ByteBuf target, ByteBuf source, int targetAccumulationSize, int accumulatedSize) {
if (accumulatedSize == 0 && source.readableBytes() >= targetAccumulationSize) {
return source;
}
int copyLength = Math.min(source.readableBytes(), targetAccumulationSize - accumulatedSize);
if (copyLength > 0) {
target.writeBytes(source, copyLength);
}
if (accumulatedSize + copyLength == targetAccumulationSize) {
return target;
}
return null;
}
|
@Test
void testAccumulateWithoutCopy() {
int sourceLength = 128;
int sourceReaderIndex = 32;
int expectedAccumulationSize = 16;
ByteBuf src = createSourceBuffer(sourceLength, sourceReaderIndex, expectedAccumulationSize);
ByteBuf target = Unpooled.buffer(expectedAccumulationSize);
// If src has enough data and no data has been copied yet, src will be returned without
// modification.
ByteBuf accumulated =
ByteBufUtils.accumulate(
target, src, expectedAccumulationSize, target.readableBytes());
assertThat(accumulated).isSameAs(src);
assertThat(src.readerIndex()).isEqualTo(sourceReaderIndex);
verifyBufferContent(src, sourceReaderIndex, expectedAccumulationSize);
}
|
public void setMemoryLimit(final long memoryLimit) {
if (memoryLimit <= 0) {
throw new IllegalArgumentException();
}
this.memoryLimit = memoryLimit;
}
|
@Test
public void testSetMemoryLimiterWhenIllegal() {
long lessThanZero = -1;
MemoryLimiter memoryLimiter = new MemoryLimiter(instrumentation);
assertThrows(IllegalArgumentException.class, () -> memoryLimiter.setMemoryLimit(lessThanZero));
}
|
@Override
public boolean isIndividual() {
return false;
}
|
@Test
public void testIsIndividual() {
CorporateEdsLoginAuthenticator authenticator = new CorporateEdsLoginAuthenticator();
assertThat(authenticator.isIndividual()).isFalse();
}
|
public static boolean isIn(ChronoLocalDateTime<?> date, ChronoLocalDateTime<?> beginDate, ChronoLocalDateTime<?> endDate) {
return TemporalAccessorUtil.isIn(date, beginDate, endDate);
}
|
@SuppressWarnings("ConstantConditions")
@Test
public void isIn() {
// 时间范围 8点-9点
final LocalDateTime begin = LocalDateTime.parse("2019-02-02T08:00:00");
final LocalDateTime end = LocalDateTime.parse("2019-02-02T09:00:00");
// 不在时间范围内 用例
assertFalse(LocalDateTimeUtil.isIn(LocalDateTime.parse("2019-02-02T06:00:00"), begin, end));
assertFalse(LocalDateTimeUtil.isIn(LocalDateTime.parse("2019-02-02T13:00:00"), begin, end));
assertFalse(LocalDateTimeUtil.isIn(LocalDateTime.parse("2019-02-01T08:00:00"), begin, end));
assertFalse(LocalDateTimeUtil.isIn(LocalDateTime.parse("2019-02-03T09:00:00"), begin, end));
// 在时间范围内 用例
assertTrue(LocalDateTimeUtil.isIn(LocalDateTime.parse("2019-02-02T08:00:00"), begin, end));
assertTrue(LocalDateTimeUtil.isIn(LocalDateTime.parse("2019-02-02T08:00:01"), begin, end));
assertTrue(LocalDateTimeUtil.isIn(LocalDateTime.parse("2019-02-02T08:11:00"), begin, end));
assertTrue(LocalDateTimeUtil.isIn(LocalDateTime.parse("2019-02-02T08:22:00"), begin, end));
assertTrue(LocalDateTimeUtil.isIn(LocalDateTime.parse("2019-02-02T08:59:59"), begin, end));
assertTrue(LocalDateTimeUtil.isIn(LocalDateTime.parse("2019-02-02T09:00:00"), begin, end));
// 测试边界条件
assertTrue(LocalDateTimeUtil.isIn(begin, begin, end, true, false));
assertFalse(LocalDateTimeUtil.isIn(begin, begin, end, false, false));
assertTrue(LocalDateTimeUtil.isIn(end, begin, end, false, true));
assertFalse(LocalDateTimeUtil.isIn(end, begin, end, false, false));
// begin、end互换
assertTrue(LocalDateTimeUtil.isIn(begin, end, begin, true, true));
// 比较当前时间范围
final LocalDateTime now = LocalDateTime.now();
assertTrue(LocalDateTimeUtil.isIn(now, now.minusHours(1L), now.plusHours(1L)));
assertFalse(LocalDateTimeUtil.isIn(now, now.minusHours(1L), now.minusHours(2L)));
assertFalse(LocalDateTimeUtil.isIn(now, now.plusHours(1L), now.plusHours(2L)));
// 异常入参
assertThrows(IllegalArgumentException.class, () -> LocalDateTimeUtil.isIn(null, begin, end, false, false));
assertThrows(IllegalArgumentException.class, () -> LocalDateTimeUtil.isIn(begin, null, end, false, false));
assertThrows(IllegalArgumentException.class, () -> LocalDateTimeUtil.isIn(begin, begin, null, false, false));
}
|
public String redirectWithCorrectAttributesForAd(HttpServletRequest httpRequest, AuthenticationRequest authenticationRequest) throws SamlParseException {
try {
String redirectUrl;
SamlSession samlSession = authenticationRequest.getSamlSession();
if (samlSession.getValidationStatus() != null && samlSession.getValidationStatus().equals(STATUS_INVALID.label)) {
return cancelAuthenticationToAd(authenticationRequest, samlSession.getArtifact());
} else if (authenticationRequest.getIdpAssertion() == null) {
String returnUrl = generateReturnUrl(httpRequest, authenticationRequest.getSamlSession().getArtifact(), REDIRECT_WITH_ARTIFACT_URL);
redirectUrl = prepareAuthenticationToAd(returnUrl, authenticationRequest);
logger.info("Authentication sent to Ad: {}", redirectUrl);
} else {
redirectUrl = prepareBvdSession(authenticationRequest);
logger.info("Redirected to BVD: {}", redirectUrl);
}
return redirectUrl;
} catch (MetadataException | BvdException | DecryptionException | SamlSessionException e) {
throw new SamlParseException("BVD exception starting session", e);
} catch (UnsupportedEncodingException e) {
throw new SamlParseException("Authentication cannot encode RelayState", e);
}
}
|
@Test
public void redirectWithCorrectAttributesToAdForBvDTest() throws SamlParseException, SamlSessionException, DienstencatalogusException, SharedServiceClientException, UnsupportedEncodingException, ComponentInitializationException, SamlValidationException, MessageDecodingException, MetadataException, DecryptionException {
String samlRequest = readXMLFile(authnRequestIdpBvdFile);
String decodeSAMLRequest = encodeAuthnRequest(samlRequest);
httpServletRequestMock.setParameter("SAMLRequest", decodeSAMLRequest);
AuthenticationRequest authenticationRequest = authenticationIdpService.startAuthenticationProcess(httpServletRequestMock);
SamlSession samlSession = new SamlSession(1L);
authenticationRequest.setSamlSession(samlSession);
when(bvdMetadataServiceMock.getEntityID()).thenReturn("entityId");
when(bvdMetadataServiceMock.getCredential()).thenReturn(credential);
NameID nameID = OpenSAMLUtils.buildSAMLObject(NameID.class);
nameID.setValue("bsn");
when(encryptionServiceMock.decryptValue(any(EncryptedID.class), any(Credential.class), anyString())).thenReturn(nameID);
authenticationIdpService.redirectWithCorrectAttributesForAd(httpServletRequestMock, authenticationRequest);
assertNotNull(samlSession.getTransactionId());
}
|
public Map<String, MountInfo> getMountTable() {
try (LockResource r = new LockResource(mReadLock)) {
return new HashMap<>(mState.getMountTable());
}
}
|
@Test
public void getMountTable() throws Exception {
Map<String, MountInfo> mountTable = new HashMap<>(2);
mountTable.put("/mnt/foo",
new MountInfo(new AlluxioURI("/mnt/foo"), new AlluxioURI("hdfs://localhost:5678/foo"), 2L,
MountContext.defaults().getOptions().build()));
mountTable.put("/mnt/bar",
new MountInfo(new AlluxioURI("/mnt/bar"), new AlluxioURI("hdfs://localhost:5678/bar"), 3L,
MountContext.defaults().getOptions().build()));
AlluxioURI masterAddr = new AlluxioURI("alluxio://localhost:1234");
for (Map.Entry<String, MountInfo> mountPoint : mountTable.entrySet()) {
MountInfo mountInfo = mountPoint.getValue();
mMountTable.add(NoopJournalContext.INSTANCE, masterAddr.join(mountPoint.getKey()),
mountInfo.getUfsUri(), mountInfo.getMountId(), mountInfo.getOptions());
}
// Add root mountpoint
mountTable.put("/", new MountInfo(new AlluxioURI("/"), new AlluxioURI("s3a://bucket/"),
IdUtils.ROOT_MOUNT_ID, MountContext.defaults().getOptions().build()));
Assert.assertEquals(mountTable, mMountTable.getMountTable());
}
|
@Override
public List<byte[]> clusterGetKeysInSlot(int slot, Integer count) {
RFuture<List<byte[]>> f = executorService.readAsync((String)null, ByteArrayCodec.INSTANCE, CLUSTER_GETKEYSINSLOT, slot, count);
return syncFuture(f);
}
|
@Test
public void testClusterGetKeysInSlot() {
List<byte[]> keys = connection.clusterGetKeysInSlot(12, 10);
assertThat(keys).isEmpty();
}
|
@Nullable
@Override
public Message decode(@Nonnull RawMessage rawMessage) {
final String msg = new String(rawMessage.getPayload(), charset);
try (Timer.Context ignored = this.decodeTime.time()) {
final ResolvableInetSocketAddress address = rawMessage.getRemoteAddress();
final InetSocketAddress remoteAddress;
if (address == null) {
remoteAddress = null;
} else {
remoteAddress = address.getInetSocketAddress();
}
return parse(msg, remoteAddress == null ? null : remoteAddress.getAddress(), rawMessage.getTimestamp());
}
}
|
@Test
public void testDecodeUnstructuredWithFullMessage() throws Exception {
when(configuration.getBoolean(SyslogCodec.CK_STORE_FULL_MESSAGE)).thenReturn(true);
final Message message = codec.decode(buildRawMessage(UNSTRUCTURED));
assertNotNull(message);
assertEquals("c4dc57ba1ebb syslog-ng[7208]: syslog-ng starting up; version='3.5.3'", message.getMessage());
assertEquals(new DateTime(YEAR + "-10-21T12:09:37"), message.getField("timestamp"));
assertEquals("c4dc57ba1ebb", message.getField("source"));
assertEquals(5, message.getField("level"));
assertEquals("syslogd", message.getField("facility"));
assertEquals(UNSTRUCTURED, message.getField("full_message"));
assertEquals(5, message.getField("facility_num"));
}
|
public static <InputT, OutputT> TimestampExtractTransform<InputT, OutputT> of(
PCollectionTransform<InputT, OutputT> transform) {
return new TimestampExtractTransform<>(null, transform);
}
|
@SuppressWarnings("unchecked")
@Test(timeout = 10000)
public void testTransform() {
Pipeline p = Pipeline.create();
PCollection<Integer> input = p.apply(Create.of(1, 2, 3));
PCollection<KV<Integer, Long>> result =
input.apply(
TimestampExtractTransform.of(
in -> CountByKey.of(in).keyBy(KV::getValue, TypeDescriptors.integers()).output()));
PAssert.that(result).containsInAnyOrder(KV.of(1, 1L), KV.of(2, 1L), KV.of(3, 1L));
p.run().waitUntilFinish();
}
|
public static List<CredentialRetriever> getToCredentialRetrievers(
CommonCliOptions commonCliOptions, DefaultCredentialRetrievers defaultCredentialRetrievers)
throws FileNotFoundException {
// these are all mutually exclusive as enforced by the CLI
commonCliOptions
.getUsernamePassword()
.ifPresent(
credential ->
defaultCredentialRetrievers.setKnownCredential(
credential, "--username/--password"));
commonCliOptions
.getToUsernamePassword()
.ifPresent(
credential ->
defaultCredentialRetrievers.setKnownCredential(
credential, "--to-username/--to-password"));
commonCliOptions
.getCredentialHelper()
.ifPresent(defaultCredentialRetrievers::setCredentialHelper);
commonCliOptions
.getToCredentialHelper()
.ifPresent(defaultCredentialRetrievers::setCredentialHelper);
return defaultCredentialRetrievers.asList();
}
|
@Test
@Parameters(method = "paramsToUsernamePassword")
public void testGetToUsernamePassword(String expectedSource, String[] args)
throws FileNotFoundException {
CommonCliOptions commonCliOptions =
CommandLine.populateCommand(new CommonCliOptions(), ArrayUtils.addAll(DEFAULT_ARGS, args));
Credentials.getToCredentialRetrievers(commonCliOptions, defaultCredentialRetrievers);
ArgumentCaptor<Credential> captor = ArgumentCaptor.forClass(Credential.class);
verify(defaultCredentialRetrievers)
.setKnownCredential(captor.capture(), ArgumentMatchers.eq(expectedSource));
assertThat(captor.getValue()).isEqualTo(Credential.from("abc", "xyz"));
verify(defaultCredentialRetrievers).asList();
verifyNoMoreInteractions(defaultCredentialRetrievers);
}
|
@Implementation
protected HttpResponse execute(
HttpHost httpHost, HttpRequest httpRequest, HttpContext httpContext)
throws HttpException, IOException {
if (FakeHttp.getFakeHttpLayer().isInterceptingHttpRequests()) {
return FakeHttp.getFakeHttpLayer()
.emulateRequest(httpHost, httpRequest, httpContext, realObject);
} else {
FakeHttp.getFakeHttpLayer()
.addRequestInfo(new HttpRequestInfo(httpRequest, httpHost, httpContext, redirector));
HttpResponse response = redirector.execute(httpHost, httpRequest, httpContext);
if (FakeHttp.getFakeHttpLayer().isInterceptingResponseContent()) {
interceptResponseContent(response);
}
FakeHttp.getFakeHttpLayer().addHttpResponse(response);
return response;
}
}
|
@Test
public void clearPendingHttpResponses() throws Exception {
FakeHttp.addPendingHttpResponse(200, "earlier");
FakeHttp.clearPendingHttpResponses();
FakeHttp.addPendingHttpResponse(500, "later");
HttpResponse response = requestDirector.execute(null, new HttpGet("http://some.uri"), null);
assertNotNull(response);
assertThat(response.getStatusLine().getStatusCode()).isEqualTo(500);
assertThat(getStringContent(response)).isEqualTo("later");
}
|
@Cacheable(value = "metadata-response", key = "#samlMetadataRequest.cacheableKey()")
public SamlMetadataResponse resolveSamlMetadata(SamlMetadataRequest samlMetadataRequest) {
LOGGER.info("Cache not found for saml-metadata {}", samlMetadataRequest.hashCode());
Connection connection = connectionService.getConnectionByEntityId(samlMetadataRequest.getConnectionEntityId());
MetadataResponseStatus metadataResponseStatus = null;
nl.logius.digid.dc.domain.service.Service service = null;
if (connection == null) {
metadataResponseStatus = CONNECTION_NOT_FOUND;
} else if (!connection.getStatus().isAllowed()) {
metadataResponseStatus = CONNECTION_INACTIVE;
} else if (!connection.getOrganization().getStatus().isAllowed()) {
metadataResponseStatus = ORGANIZATION_INACTIVE;
} else if (Boolean.FALSE.equals(connection.getOrganizationRole().getStatus().isAllowed())) {
metadataResponseStatus = ORGANIZATION_ROLE_INACTIVE;
} else {
String serviceUUID = samlMetadataRequest.getServiceUuid() == null ? getServiceUUID(connection, samlMetadataRequest.getServiceEntityId(), samlMetadataRequest.getServiceIdx()) : samlMetadataRequest.getServiceUuid();
samlMetadataRequest.setServiceUuid(serviceUUID);
service = serviceService.serviceExists(connection, samlMetadataRequest.getServiceEntityId(), serviceUUID);
if (service == null) {
metadataResponseStatus = SERVICE_NOT_FOUND;
} else if (!service.getStatus().isAllowed()) {
metadataResponseStatus = SERVICE_INACTIVE;
}
}
if (metadataResponseStatus != null) {
return metadataResponseMapper.mapErrorResponse(metadataResponseStatus.name(), metadataResponseStatus.label);
} else {
String samlMetadata = generateReducedMetadataString(connection, service.getEntityId());
return metadataResponseMapper.mapSuccessResponse(samlMetadata, connection, service, STATUS_OK.name());
}
}
|
@Test
void organizationRoleInactiveTest() {
Connection connection = newConnection(SAML_COMBICONNECT, true, true, false);
when(connectionServiceMock.getConnectionByEntityId(anyString())).thenReturn(connection);
SamlMetadataResponse response = metadataRetrieverServiceMock.resolveSamlMetadata(newMetadataRequest());
assertEquals(ORGANIZATION_ROLE_INACTIVE.name(), response.getRequestStatus());
assertEquals(ORGANIZATION_ROLE_INACTIVE.label, response.getErrorDescription());
}
|
static Map<String, Expression> getNumericPredictorsExpressions(final List<NumericPredictor> numericPredictors) {
return numericPredictors.stream()
.collect(Collectors.toMap(numericPredictor ->numericPredictor.getField(),
KiePMMLRegressionTableFactory::getNumericPredictorExpression));
}
|
@Test
void getNumericPredictorsExpressions() {
final List<NumericPredictor> numericPredictors = IntStream.range(0, 3).mapToObj(index -> {
String predictorName = "predictorName-" + index;
double coefficient = 1.23 * index;
return PMMLModelTestUtils.getNumericPredictor(predictorName, index, coefficient);
}).collect(Collectors.toList());
Map<String, Expression> retrieved =
KiePMMLRegressionTableFactory.getNumericPredictorsExpressions(numericPredictors);
assertThat(retrieved).hasSameSizeAs(numericPredictors);
}
|
@Override
public Future<?> submit(Runnable runnable) {
submitted.mark();
return delegate.submit(new InstrumentedRunnable(runnable));
}
|
@Test
public void reportsTasksInformationForRunnable() throws Exception {
assertThat(submitted.getCount()).isEqualTo(0);
assertThat(running.getCount()).isEqualTo(0);
assertThat(completed.getCount()).isEqualTo(0);
assertThat(duration.getCount()).isEqualTo(0);
assertThat(idle.getCount()).isEqualTo(0);
Runnable runnable = () -> {
assertThat(submitted.getCount()).isEqualTo(1);
assertThat(running.getCount()).isEqualTo(1);
assertThat(completed.getCount()).isEqualTo(0);
assertThat(duration.getCount()).isEqualTo(0);
assertThat(idle.getCount()).isEqualTo(1);
};
Future<?> theFuture = instrumentedExecutorService.submit(runnable);
theFuture.get();
assertThat(submitted.getCount()).isEqualTo(1);
assertThat(running.getCount()).isEqualTo(0);
assertThat(completed.getCount()).isEqualTo(1);
assertThat(duration.getCount()).isEqualTo(1);
assertThat(duration.getSnapshot().size()).isEqualTo(1);
assertThat(idle.getCount()).isEqualTo(1);
assertThat(idle.getSnapshot().size()).isEqualTo(1);
}
|
public static RecordBatchRowIterator rowsFromRecordBatch(
Schema schema, VectorSchemaRoot vectorSchemaRoot) {
return new RecordBatchRowIterator(schema, vectorSchemaRoot);
}
|
@Test
public void rowIterator() {
org.apache.arrow.vector.types.pojo.Schema schema =
new org.apache.arrow.vector.types.pojo.Schema(
asList(
field("int32", new ArrowType.Int(32, true)),
field("float64", new ArrowType.FloatingPoint(FloatingPointPrecision.DOUBLE)),
field("string", new ArrowType.Utf8()),
field("timestampMicroUTC", new ArrowType.Timestamp(TimeUnit.MICROSECOND, "UTC")),
field("timestampMilliUTC", new ArrowType.Timestamp(TimeUnit.MILLISECOND, "UTC")),
field(
"int32_list",
new ArrowType.List(),
field("int32s", new ArrowType.Int(32, true))),
field("boolean", new ArrowType.Bool()),
field("fixed_size_binary", new ArrowType.FixedSizeBinary(3))));
Schema beamSchema = ArrowConversion.ArrowSchemaTranslator.toBeamSchema(schema);
VectorSchemaRoot expectedSchemaRoot = VectorSchemaRoot.create(schema, allocator);
expectedSchemaRoot.allocateNew();
expectedSchemaRoot.setRowCount(16);
IntVector intVector = (IntVector) expectedSchemaRoot.getFieldVectors().get(0);
Float8Vector floatVector = (Float8Vector) expectedSchemaRoot.getFieldVectors().get(1);
VarCharVector strVector = (VarCharVector) expectedSchemaRoot.getFieldVectors().get(2);
TimeStampMicroTZVector timestampMicroUtcVector =
(TimeStampMicroTZVector) expectedSchemaRoot.getFieldVectors().get(3);
TimeStampMilliTZVector timeStampMilliTZVector =
(TimeStampMilliTZVector) expectedSchemaRoot.getFieldVectors().get(4);
ListVector int32ListVector = (ListVector) expectedSchemaRoot.getFieldVectors().get(5);
IntVector int32ListElementVector =
int32ListVector
.<IntVector>addOrGetVector(
new org.apache.arrow.vector.types.pojo.FieldType(
false, new ArrowType.Int(32, true), null))
.getVector();
BitVector boolVector = (BitVector) expectedSchemaRoot.getFieldVectors().get(6);
FixedSizeBinaryVector fixedSizeBinaryVector =
(FixedSizeBinaryVector) expectedSchemaRoot.getFieldVectors().get(7);
ArrayList<Row> expectedRows = new ArrayList<>();
for (int i = 0; i < 16; i++) {
DateTime dt = new DateTime(2019, 1, i + 1, i, i, i, DateTimeZone.UTC);
expectedRows.add(
Row.withSchema(beamSchema)
.addValues(
i,
i + .1 * i,
"" + i,
dt,
dt,
ImmutableList.of(i),
(i % 2) != 0,
new byte[] {(byte) i, (byte) (i + 1), (byte) (i + 2)})
.build());
intVector.set(i, i);
floatVector.set(i, i + .1 * i);
strVector.set(i, new Text("" + i));
timestampMicroUtcVector.set(i, dt.getMillis() * 1000);
timeStampMilliTZVector.set(i, dt.getMillis());
int32ListVector.startNewValue(i);
int32ListElementVector.set(i, i);
int32ListVector.endValue(i, 1);
boolVector.set(i, i % 2);
fixedSizeBinaryVector.set(i, new byte[] {(byte) i, (byte) (i + 1), (byte) (i + 2)});
}
assertThat(
ImmutableList.copyOf(ArrowConversion.rowsFromRecordBatch(beamSchema, expectedSchemaRoot)),
IsIterableContainingInOrder.contains(
expectedRows.stream()
.map((row) -> equalTo(row))
.collect(ImmutableList.toImmutableList())));
expectedSchemaRoot.close();
}
|
public static <FnT extends DoFn<?, ?>> DoFnSignature getSignature(Class<FnT> fn) {
return signatureCache.computeIfAbsent(fn, DoFnSignatures::parseSignature);
}
|
@Test
public void testTimerIdNonFinal() throws Exception {
thrown.expect(IllegalArgumentException.class);
thrown.expectMessage("Timer declarations must be final");
thrown.expectMessage("Non-final field");
thrown.expectMessage("myfield");
thrown.expectMessage(not(containsString("State"))); // lowercase "state" is in the package name
thrown.expectMessage(mentionsTimers());
DoFnSignatures.getSignature(
new DoFn<KV<String, Integer>, Long>() {
@TimerId("my-timer-id")
private TimerSpec myfield = TimerSpecs.timer(TimeDomain.PROCESSING_TIME);
@ProcessElement
public void foo(ProcessContext context) {}
}.getClass());
}
|
public boolean canUserEditTemplate(PipelineTemplateConfig template, CaseInsensitiveString username, List<Role> roles) {
return template.getAuthorization().isUserAnAdmin(username, roles);
}
|
@Test
public void shouldReturnFalseIfUserWithinARoleCannotEditTemplate() {
CaseInsensitiveString templateAdmin = new CaseInsensitiveString("template-admin");
Role securityConfigRole = getSecurityConfigRole(templateAdmin);
List<Role> roles = setupRoles(securityConfigRole);
String templateName = "template1";
PipelineTemplateConfig template = PipelineTemplateConfigMother.createTemplate(templateName, new Authorization(new AdminsConfig(new AdminRole(new CaseInsensitiveString("another-role")))),
StageConfigMother.manualStage("random-stage"));
TemplatesConfig templates = new TemplatesConfig(template);
assertThat(templates.canUserEditTemplate(template, templateAdmin, roles), is(false));
}
|
@Override
public void setUpTableMissEntry(DeviceId deviceId, int table) {
TrafficSelector.Builder selector = DefaultTrafficSelector.builder();
TrafficTreatment.Builder treatment = DefaultTrafficTreatment.builder();
treatment.drop();
FlowRule flowRule = DefaultFlowRule.builder()
.forDevice(deviceId)
.withSelector(selector.build())
.withTreatment(treatment.build())
.withPriority(DROP_PRIORITY)
.fromApp(appId)
.makePermanent()
.forTable(table)
.build();
applyRule(flowRule, true);
}
|
@Test
public void testSetUpTableMissEntry() {
int testTable = 10;
fros = Sets.newConcurrentHashSet();
TrafficSelector.Builder selectorBuilder = DefaultTrafficSelector.builder();
TrafficTreatment.Builder treatmentBuilder = DefaultTrafficTreatment.builder();
target.setUpTableMissEntry(DEVICE_ID, testTable);
FlowRule.Builder flowRuleBuilder = DefaultFlowRule.builder()
.forDevice(DEVICE_ID)
.withSelector(selectorBuilder.build())
.withTreatment(treatmentBuilder.drop().build())
.withPriority(DROP_PRIORITY)
.fromApp(TEST_APP_ID)
.forTable(testTable)
.makePermanent();
validateFlowRule(flowRuleBuilder.build());
}
|
@Override
public Optional<KsqlConstants.PersistentQueryType> getPersistentQueryType() {
if (!queryPlan.isPresent()) {
return Optional.empty();
}
// CREATE_AS and CREATE_SOURCE commands contain a DDL command and a Query plan.
if (ddlCommand.isPresent()) {
if (ddlCommand.get() instanceof CreateTableCommand
&& ((CreateTableCommand) ddlCommand.get()).getIsSource()) {
return Optional.of(KsqlConstants.PersistentQueryType.CREATE_SOURCE);
} else {
return Optional.of(KsqlConstants.PersistentQueryType.CREATE_AS);
}
} else {
// INSERT INTO persistent queries are the only queries types that exist without a
// DDL command linked to the plan.
return Optional.of(KsqlConstants.PersistentQueryType.INSERT);
}
}
|
@Test
public void shouldReturnNoPersistentQueryTypeOnPlansWithoutQueryPlans() {
// Given:
final KsqlPlanV1 plan = new KsqlPlanV1(
"stmt",
Optional.of(ddlCommand1),
Optional.empty());
// When/Then:
assertThat(plan.getPersistentQueryType(), is(Optional.empty()));
}
|
void validateManualPartitionAssignment(
PartitionAssignment assignment,
OptionalInt replicationFactor
) {
if (assignment.replicas().isEmpty()) {
throw new InvalidReplicaAssignmentException("The manual partition " +
"assignment includes an empty replica list.");
}
List<Integer> sortedBrokerIds = new ArrayList<>(assignment.replicas());
sortedBrokerIds.sort(Integer::compare);
Integer prevBrokerId = null;
for (Integer brokerId : sortedBrokerIds) {
if (!clusterControl.brokerRegistrations().containsKey(brokerId)) {
throw new InvalidReplicaAssignmentException("The manual partition " +
"assignment includes broker " + brokerId + ", but no such broker is " +
"registered.");
}
if (brokerId.equals(prevBrokerId)) {
throw new InvalidReplicaAssignmentException("The manual partition " +
"assignment includes the broker " + prevBrokerId + " more than " +
"once.");
}
prevBrokerId = brokerId;
}
if (replicationFactor.isPresent() &&
sortedBrokerIds.size() != replicationFactor.getAsInt()) {
throw new InvalidReplicaAssignmentException("The manual partition " +
"assignment includes a partition with " + sortedBrokerIds.size() +
" replica(s), but this is not consistent with previous " +
"partitions, which have " + replicationFactor.getAsInt() + " replica(s).");
}
}
|
@Test
public void testValidateBadManualPartitionAssignments() {
ReplicationControlTestContext ctx = new ReplicationControlTestContext.Builder().build();
ctx.registerBrokers(1, 2);
assertEquals("The manual partition assignment includes an empty replica list.",
assertThrows(InvalidReplicaAssignmentException.class, () ->
ctx.replicationControl.validateManualPartitionAssignment(partitionAssignment(Collections.emptyList()),
OptionalInt.empty())).getMessage());
assertEquals("The manual partition assignment includes broker 3, but no such " +
"broker is registered.", assertThrows(InvalidReplicaAssignmentException.class, () ->
ctx.replicationControl.validateManualPartitionAssignment(partitionAssignment(asList(1, 2, 3)),
OptionalInt.empty())).getMessage());
assertEquals("The manual partition assignment includes the broker 2 more than " +
"once.", assertThrows(InvalidReplicaAssignmentException.class, () ->
ctx.replicationControl.validateManualPartitionAssignment(partitionAssignment(asList(1, 2, 2)),
OptionalInt.empty())).getMessage());
assertEquals("The manual partition assignment includes a partition with 2 " +
"replica(s), but this is not consistent with previous partitions, which have " +
"3 replica(s).", assertThrows(InvalidReplicaAssignmentException.class, () ->
ctx.replicationControl.validateManualPartitionAssignment(partitionAssignment(asList(1, 2)),
OptionalInt.of(3))).getMessage());
}
|
@Override
public void close() throws IOException {
if (open.compareAndSet(true, false)) {
onClose.run();
Throwable thrown = null;
do {
for (Closeable resource : resources) {
try {
resource.close();
} catch (Throwable e) {
if (thrown == null) {
thrown = e;
} else {
thrown.addSuppressed(e);
}
} finally {
// ensure the resource is removed even if it doesn't remove itself when closed
resources.remove(resource);
}
}
// It's possible for a thread registering a resource to register that resource after open
// has been set to false and even after we've looped through and closed all the resources.
// Since registering must be incremented *before* checking the state of open, however,
// when we reach this point in that situation either the register call is still in progress
// (registering > 0) or the new resource has been successfully added (resources not empty).
// In either case, we just need to repeat the loop until there are no more register calls
// in progress (no new calls can start and no resources left to close.
} while (registering.get() > 0 || !resources.isEmpty());
if (thrown != null) {
throwIfInstanceOf(thrown, IOException.class);
throwIfUnchecked(thrown);
}
}
}
|
@Test
public void testClose_multipleTimesDoNothing() throws IOException {
state.close();
assertEquals(1, onClose.runCount);
state.close();
state.close();
assertEquals(1, onClose.runCount);
}
|
public void decode(ByteBuf buffer) {
boolean last;
int statusCode;
while (true) {
switch(state) {
case READ_COMMON_HEADER:
if (buffer.readableBytes() < SPDY_HEADER_SIZE) {
return;
}
int frameOffset = buffer.readerIndex();
int flagsOffset = frameOffset + SPDY_HEADER_FLAGS_OFFSET;
int lengthOffset = frameOffset + SPDY_HEADER_LENGTH_OFFSET;
buffer.skipBytes(SPDY_HEADER_SIZE);
boolean control = (buffer.getByte(frameOffset) & 0x80) != 0;
int version;
int type;
if (control) {
// Decode control frame common header
version = getUnsignedShort(buffer, frameOffset) & 0x7FFF;
type = getUnsignedShort(buffer, frameOffset + SPDY_HEADER_TYPE_OFFSET);
streamId = 0; // Default to session Stream-ID
} else {
// Decode data frame common header
version = spdyVersion; // Default to expected version
type = SPDY_DATA_FRAME;
streamId = getUnsignedInt(buffer, frameOffset);
}
flags = buffer.getByte(flagsOffset);
length = getUnsignedMedium(buffer, lengthOffset);
// Check version first then validity
if (version != spdyVersion) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SPDY Version");
} else if (!isValidFrameHeader(streamId, type, flags, length)) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid Frame Error");
} else {
state = getNextState(type, length);
}
break;
case READ_DATA_FRAME:
if (length == 0) {
state = State.READ_COMMON_HEADER;
delegate.readDataFrame(streamId, hasFlag(flags, SPDY_DATA_FLAG_FIN), Unpooled.buffer(0));
break;
}
// Generate data frames that do not exceed maxChunkSize
int dataLength = Math.min(maxChunkSize, length);
// Wait until entire frame is readable
if (buffer.readableBytes() < dataLength) {
return;
}
ByteBuf data = buffer.alloc().buffer(dataLength);
data.writeBytes(buffer, dataLength);
length -= dataLength;
if (length == 0) {
state = State.READ_COMMON_HEADER;
}
last = length == 0 && hasFlag(flags, SPDY_DATA_FLAG_FIN);
delegate.readDataFrame(streamId, last, data);
break;
case READ_SYN_STREAM_FRAME:
if (buffer.readableBytes() < 10) {
return;
}
int offset = buffer.readerIndex();
streamId = getUnsignedInt(buffer, offset);
int associatedToStreamId = getUnsignedInt(buffer, offset + 4);
byte priority = (byte) (buffer.getByte(offset + 8) >> 5 & 0x07);
last = hasFlag(flags, SPDY_FLAG_FIN);
boolean unidirectional = hasFlag(flags, SPDY_FLAG_UNIDIRECTIONAL);
buffer.skipBytes(10);
length -= 10;
if (streamId == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SYN_STREAM Frame");
} else {
state = State.READ_HEADER_BLOCK;
delegate.readSynStreamFrame(streamId, associatedToStreamId, priority, last, unidirectional);
}
break;
case READ_SYN_REPLY_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
last = hasFlag(flags, SPDY_FLAG_FIN);
buffer.skipBytes(4);
length -= 4;
if (streamId == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SYN_REPLY Frame");
} else {
state = State.READ_HEADER_BLOCK;
delegate.readSynReplyFrame(streamId, last);
}
break;
case READ_RST_STREAM_FRAME:
if (buffer.readableBytes() < 8) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
statusCode = getSignedInt(buffer, buffer.readerIndex() + 4);
buffer.skipBytes(8);
if (streamId == 0 || statusCode == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid RST_STREAM Frame");
} else {
state = State.READ_COMMON_HEADER;
delegate.readRstStreamFrame(streamId, statusCode);
}
break;
case READ_SETTINGS_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
boolean clear = hasFlag(flags, SPDY_SETTINGS_CLEAR);
numSettings = getUnsignedInt(buffer, buffer.readerIndex());
buffer.skipBytes(4);
length -= 4;
// Validate frame length against number of entries. Each ID/Value entry is 8 bytes.
if ((length & 0x07) != 0 || length >> 3 != numSettings) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SETTINGS Frame");
} else {
state = State.READ_SETTING;
delegate.readSettingsFrame(clear);
}
break;
case READ_SETTING:
if (numSettings == 0) {
state = State.READ_COMMON_HEADER;
delegate.readSettingsEnd();
break;
}
if (buffer.readableBytes() < 8) {
return;
}
byte settingsFlags = buffer.getByte(buffer.readerIndex());
int id = getUnsignedMedium(buffer, buffer.readerIndex() + 1);
int value = getSignedInt(buffer, buffer.readerIndex() + 4);
boolean persistValue = hasFlag(settingsFlags, SPDY_SETTINGS_PERSIST_VALUE);
boolean persisted = hasFlag(settingsFlags, SPDY_SETTINGS_PERSISTED);
buffer.skipBytes(8);
--numSettings;
delegate.readSetting(id, value, persistValue, persisted);
break;
case READ_PING_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
int pingId = getSignedInt(buffer, buffer.readerIndex());
buffer.skipBytes(4);
state = State.READ_COMMON_HEADER;
delegate.readPingFrame(pingId);
break;
case READ_GOAWAY_FRAME:
if (buffer.readableBytes() < 8) {
return;
}
int lastGoodStreamId = getUnsignedInt(buffer, buffer.readerIndex());
statusCode = getSignedInt(buffer, buffer.readerIndex() + 4);
buffer.skipBytes(8);
state = State.READ_COMMON_HEADER;
delegate.readGoAwayFrame(lastGoodStreamId, statusCode);
break;
case READ_HEADERS_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
last = hasFlag(flags, SPDY_FLAG_FIN);
buffer.skipBytes(4);
length -= 4;
if (streamId == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid HEADERS Frame");
} else {
state = State.READ_HEADER_BLOCK;
delegate.readHeadersFrame(streamId, last);
}
break;
case READ_WINDOW_UPDATE_FRAME:
if (buffer.readableBytes() < 8) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
int deltaWindowSize = getUnsignedInt(buffer, buffer.readerIndex() + 4);
buffer.skipBytes(8);
if (deltaWindowSize == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid WINDOW_UPDATE Frame");
} else {
state = State.READ_COMMON_HEADER;
delegate.readWindowUpdateFrame(streamId, deltaWindowSize);
}
break;
case READ_HEADER_BLOCK:
if (length == 0) {
state = State.READ_COMMON_HEADER;
delegate.readHeaderBlockEnd();
break;
}
if (!buffer.isReadable()) {
return;
}
int compressedBytes = Math.min(buffer.readableBytes(), length);
ByteBuf headerBlock = buffer.alloc().buffer(compressedBytes);
headerBlock.writeBytes(buffer, compressedBytes);
length -= compressedBytes;
delegate.readHeaderBlock(headerBlock);
break;
case DISCARD_FRAME:
int numBytes = Math.min(buffer.readableBytes(), length);
buffer.skipBytes(numBytes);
length -= numBytes;
if (length == 0) {
state = State.READ_COMMON_HEADER;
break;
}
return;
case FRAME_ERROR:
buffer.skipBytes(buffer.readableBytes());
return;
default:
throw new Error("Shouldn't reach here.");
}
}
}
|
@Test
public void testReservedSpdyRstStreamFrameBits() throws Exception {
short type = 3;
byte flags = 0;
int length = 8;
int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01;
int statusCode = RANDOM.nextInt() | 0x01;
ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length);
encodeControlFrameHeader(buf, type, flags, length);
buf.writeInt(streamId | 0x80000000); // should ignore reserved bit
buf.writeInt(statusCode);
decoder.decode(buf);
verify(delegate).readRstStreamFrame(streamId, statusCode);
assertFalse(buf.isReadable());
buf.release();
}
|
public void populate(LiveOperations liveOperations) {
this.liveOperations.forEach((key, value) -> value.keySet().forEach(callId -> liveOperations.add(key, callId)));
}
|
@Test
public void testPopulate() throws UnknownHostException {
r.register(createOperation("1.2.3.4", 1234, 2223L));
r.register(createOperation("1.2.3.4", 1234, 2222L));
r.register(createOperation("1.2.3.3", 1234, 2222L));
CallsPerMember liveOperations = new CallsPerMember(new Address("1.2.3.3", 1234));
r.populate(liveOperations);
Set<Address> addresses = liveOperations.addresses();
assertEquals(2, addresses.size());
assertTrue(addresses.contains(new Address("1.2.3.4", 1234)));
assertTrue(addresses.contains(new Address("1.2.3.3", 1234)));
long[] runningOperations = liveOperations.toOpControl(new Address("1.2.3.4", 1234)).runningOperations();
assertTrue(Arrays.equals(new long[] { 2222, 2223}, runningOperations)
|| Arrays.equals(new long[] { 2223, 2222 }, runningOperations));
runningOperations = liveOperations.toOpControl(new Address("1.2.3.3", 1234)).runningOperations();
assertArrayEquals(new long[] { 2222 }, runningOperations);
//callIds.
}
|
public boolean shouldLog(final Logger logger, final String path, final int responseCode) {
if (rateLimitersByPath.containsKey(path)) {
final RateLimiter rateLimiter = rateLimitersByPath.get(path);
if (!rateLimiter.tryAcquire()) {
if (pathLimitHit.tryAcquire()) {
logger.info("Hit rate limit for path " + path + " with limit " + rateLimiter.getRate());
}
return false;
}
}
if (rateLimitersByResponseCode.containsKey(responseCode)) {
final RateLimiter rateLimiter = rateLimitersByResponseCode.get(responseCode);
if (!rateLimiter.tryAcquire()) {
if (responseCodeLimitHit.tryAcquire()) {
logger.info("Hit rate limit for response code " + responseCode + " with limit "
+ rateLimiter.getRate());
}
return false;
}
}
return true;
}
|
@Test
public void shouldSkipRateLimited_path() {
// Given:
when(rateLimiter.tryAcquire()).thenReturn(true, true, false, false);
when(rateLimiter.getRate()).thenReturn(2d);
// When:
assertThat(loggingRateLimiter.shouldLog(logger, PATH, 200), is(true));
assertThat(loggingRateLimiter.shouldLog(logger, PATH, 200), is(true));
assertThat(loggingRateLimiter.shouldLog(logger, PATH, 200), is(false));
assertThat(loggingRateLimiter.shouldLog(logger, PATH, 200), is(false));
// Then:
verify(rateLimiter, times(4)).tryAcquire();
verify(logger, times(2)).info("Hit rate limit for path /query with limit 2.0");
}
|
public ListenableFuture<RunResponse> runWithDeadline(RunRequest request, Deadline deadline) {
return pluginService.withDeadline(deadline).run(request);
}
|
@Test
public void run_multiplePluginValidRequest_returnMultipleDetectionReports() throws Exception {
int numPluginsToTest = 5;
List<NetworkEndpoint> endpoints = new ArrayList<>(numPluginsToTest);
endpoints.add(NetworkEndpointUtils.forIpAndPort("1.1.1.1", 80));
endpoints.add(NetworkEndpointUtils.forIpAndPort("1.1.1.1", 443));
endpoints.add(NetworkEndpointUtils.forIpAndPort("1.1.1.1", 123));
endpoints.add(NetworkEndpointUtils.forIpAndPort("1.1.1.1", 456));
endpoints.add(NetworkEndpointUtils.forIpAndPort("1.1.1.1", 789));
PluginInfo.Builder pluginInfoBuilder =
PluginInfo.newBuilder()
.setType(PluginInfo.PluginType.VULN_DETECTION)
.setVersion(PLUGIN_VERSION)
.setDescription(PLUGIN_DESCRIPTION)
.setAuthor(PLUGIN_AUTHOR);
TargetInfo target = TargetInfo.newBuilder().addAllNetworkEndpoints(endpoints).build();
RunRequest.Builder runRequestBuilder = RunRequest.newBuilder().setTarget(target);
for (int i = 0; i < numPluginsToTest; i++) {
PluginInfo pluginInfo =
pluginInfoBuilder.setName(String.format(PLUGIN_NAME + " %d", i)).build();
NetworkService httpService =
NetworkService.newBuilder()
.setNetworkEndpoint(endpoints.get(i))
.setTransportProtocol(TransportProtocol.TCP)
.setServiceName("http")
.build();
runRequestBuilder.addPlugins(
MatchedPlugin.newBuilder()
.addServices(httpService)
.setPlugin(PluginDefinition.newBuilder().setInfo(pluginInfo).build()));
}
RunRequest runRequest = runRequestBuilder.build();
PluginServiceImplBase runImpl =
new PluginServiceImplBase() {
@Override
public void run(RunRequest request, StreamObserver<RunResponse> responseObserver) {
DetectionReportList.Builder reportListBuilder = DetectionReportList.newBuilder();
for (MatchedPlugin plugin : request.getPluginsList()) {
reportListBuilder.addDetectionReports(
DetectionReport.newBuilder()
.setTargetInfo(request.getTarget())
.setNetworkService(plugin.getServices(0)));
}
responseObserver.onNext(RunResponse.newBuilder().setReports(reportListBuilder).build());
responseObserver.onCompleted();
}
};
serviceRegistry.addService(runImpl);
ListenableFuture<RunResponse> run = pluginService.runWithDeadline(runRequest, DEADLINE_DEFAULT);
RunResponse runResponse = run.get();
assertThat(run.isDone()).isTrue();
assertThat(runResponse.getReports().getDetectionReportsCount()).isEqualTo(numPluginsToTest);
assertRunResponseContainsAllRunRequestParameters(runResponse, runRequest);
}
|
@Override
public List<String> splitAndEvaluate() {
try (ReflectContext context = new ReflectContext(JAVA_CLASSPATH)) {
if (Strings.isNullOrEmpty(inlineExpression)) {
return Collections.emptyList();
}
return flatten(evaluate(context, GroovyUtils.split(handlePlaceHolder(inlineExpression))));
}
}
|
@Test
void assertEvaluateForLiteral() {
List<String> expected = createInlineExpressionParser("t_order_${'xx'}").splitAndEvaluate();
assertThat(expected.size(), is(1));
assertThat(expected, hasItems("t_order_xx"));
}
|
public ApplicationDescription getApplicationDescription(String appName) {
try {
XMLConfiguration cfg = new XMLConfiguration();
cfg.setAttributeSplittingDisabled(true);
cfg.setDelimiterParsingDisabled(true);
cfg.load(appFile(appName, APP_XML));
return loadAppDescription(cfg);
} catch (Exception e) {
throw new ApplicationException("Unable to get app description", e);
}
}
|
@Test
public void loadApp() throws IOException {
saveZippedApp();
ApplicationDescription app = aar.getApplicationDescription(APP_NAME);
validate(app);
}
|
@Override
public String format(final Schema schema) {
final String converted = SchemaWalker.visit(schema, new Converter()) + typePostFix(schema);
return options.contains(Option.AS_COLUMN_LIST)
? stripTopLevelStruct(converted)
: converted;
}
|
@Test
public void shouldFormatOptionalBoolean() {
assertThat(DEFAULT.format(Schema.OPTIONAL_BOOLEAN_SCHEMA), is("BOOLEAN"));
assertThat(STRICT.format(Schema.OPTIONAL_BOOLEAN_SCHEMA), is("BOOLEAN"));
}
|
@Override
@NonNull
public Mono<ServerResponse> handle(@NonNull ServerRequest request) {
var queryParams = new SortableRequest(request.exchange());
return client.listBy(scheme.type(),
queryParams.toListOptions(),
queryParams.toPageRequest()
)
.flatMap(listResult -> ServerResponse
.ok()
.contentType(MediaType.APPLICATION_JSON)
.bodyValue(listResult));
}
|
@Test
void shouldHandleCorrectly() {
var scheme = Scheme.buildFromType(FakeExtension.class);
var listHandler = new ExtensionListHandler(scheme, client);
var exchange = MockServerWebExchange.from(MockServerHttpRequest.get("/fake")
.queryParam("sort", "metadata.name,desc"));
var serverRequest = MockServerRequest.builder().exchange(exchange).build();
final var fake01 = FakeExtension.createFake("fake01");
final var fake02 = FakeExtension.createFake("fake02");
var fakeListResult = new ListResult<>(0, 0, 2, List.of(fake01, fake02));
when(client.listBy(same(FakeExtension.class), any(ListOptions.class), any()))
.thenReturn(Mono.just(fakeListResult));
var responseMono = listHandler.handle(serverRequest);
StepVerifier.create(responseMono)
.consumeNextWith(response -> {
assertEquals(HttpStatus.OK, response.statusCode());
assertEquals(MediaType.APPLICATION_JSON, response.headers().getContentType());
assertTrue(response instanceof EntityResponse<?>);
assertEquals(fakeListResult, ((EntityResponse<?>) response).entity());
})
.verifyComplete();
verify(client).listBy(same(FakeExtension.class), any(ListOptions.class), any());
}
|
public boolean match(String pattern, String path) {
return match(pattern, path, true);
}
|
@Test
public void testCaseSensitive() {
AntPathMatcher matcher = new AntPathMatcher();
assertTrue(matcher.match("foo/**/*.txt", "foo/blah.txt", true));
assertTrue(matcher.match("foo/**/*.txt", "foo/blah.txt", false));
assertTrue(matcher.match("foo/**/*.txt", "foo/BLAH.txt"));
assertFalse(matcher.match("FOO/**/*.txt", "foo/blah.txt"));
assertFalse(matcher.match("foo/**/*.TXT", "foo/blah.txt"));
assertTrue(matcher.match("foo/**/*.TXT", "foo/blah.txt", false));
assertTrue(matcher.match("FOO/**/*.txt", "foo/blah.txt", false));
assertFalse(matcher.match("FOO/**/*.txt", "foo/blah.txt", true));
assertFalse(matcher.match("FOO/**/*.txt", "foo/blah.txt", true));
}
|
public int byteLength()
{
// per https://docs.oracle.com/javase/8/docs/api/java/util/BitSet.html#toByteArray--
return (bitSet.length() + 7) / 8;
}
|
@Test
public static void testByteLength()
{
for (int length : new int[] {8, 800}) {
Bitmap bitmap = new Bitmap(length);
for (int i = 0; i < length; i++) {
bitmap.setBit(i, true);
assertEquals(bitmap.byteLength(), bitmap.toBytes().length);
}
}
}
|
public static Object convert(YamlNode yamlNode) {
if (yamlNode == null) {
return JSONObject.NULL;
}
if (yamlNode instanceof YamlMapping yamlMapping) {
JSONObject resultObject = new JSONObject();
for (YamlNameNodePair pair : yamlMapping.childrenPairs()) {
resultObject.put(pair.nodeName(), convert(pair.childNode()));
}
return resultObject;
} else if (yamlNode instanceof YamlSequence yamlSequence) {
JSONArray resultArray = new JSONArray();
for (YamlNode child : yamlSequence.children()) {
resultArray.put(convert(child));
}
return resultArray;
} else if (yamlNode instanceof YamlScalar scalar) {
return scalar.nodeValue();
}
throw new IllegalArgumentException("Unknown type " + yamlNode.getClass().getName());
}
|
@Test
public void convertUnknownNode() {
YamlMappingImpl parentNode = createYamlMapping();
YamlMappingImpl hazelcast = (YamlMappingImpl) parentNode.childAsMapping("hazelcast");
YamlNode unknownNode = new YamlNode() {
@Override
public YamlNode parent() {
return hazelcast;
}
@Override
public String nodeName() {
return "unknown-node";
}
@Override
public String path() {
return "null";
}
};
hazelcast.addChild("unknown-node", unknownNode);
Exception exception = assertThrows(IllegalArgumentException.class,
() -> YamlToJsonConverter.convert(parentNode));
assertTrue(exception.getMessage().contains("Unknown type "));
}
|
@Override
public void init(ServletConfig config) throws ServletException {
super.init(config);
final ServletContext context = config.getServletContext();
if (null == registry) {
final Object registryAttr = context.getAttribute(METRICS_REGISTRY);
if (registryAttr instanceof MetricRegistry) {
this.registry = (MetricRegistry) registryAttr;
} else {
throw new ServletException("Couldn't find a MetricRegistry instance.");
}
}
this.allowedOrigin = context.getInitParameter(ALLOWED_ORIGIN);
this.jsonpParamName = context.getInitParameter(CALLBACK_PARAM);
setupMetricsModule(context);
}
|
@Test
public void constructorWithRegistryAsArgumentUsesServletConfigWhenNull() throws Exception {
final MetricRegistry metricRegistry = mock(MetricRegistry.class);
final ServletContext servletContext = mock(ServletContext.class);
final ServletConfig servletConfig = mock(ServletConfig.class);
when(servletConfig.getServletContext()).thenReturn(servletContext);
when(servletContext.getAttribute(eq(MetricsServlet.METRICS_REGISTRY)))
.thenReturn(metricRegistry);
final MetricsServlet metricsServlet = new MetricsServlet(null);
metricsServlet.init(servletConfig);
verify(servletConfig, times(1)).getServletContext();
verify(servletContext, times(1)).getAttribute(eq(MetricsServlet.METRICS_REGISTRY));
}
|
public Connection deleteConnectionById(Connection connection) {
connectionRepository.delete(connection);
return connection;
}
|
@Test
void deleteConnectionById() {
doNothing().when(connectionRepositoryMock).delete(any(Connection.class));
Connection result = connectionServiceMock.deleteConnectionById(new Connection());
verify(connectionRepositoryMock, times(1)).delete(any(Connection.class));
assertNotNull(result);
}
|
public static void main(String[] args) {
LOGGER.info("Constructing parts and car");
var wheelProperties = Map.of(
Property.TYPE.toString(), "wheel",
Property.MODEL.toString(), "15C",
Property.PRICE.toString(), 100L);
var doorProperties = Map.of(
Property.TYPE.toString(), "door",
Property.MODEL.toString(), "Lambo",
Property.PRICE.toString(), 300L);
var carProperties = Map.of(
Property.MODEL.toString(), "300SL",
Property.PRICE.toString(), 10000L,
Property.PARTS.toString(), List.of(wheelProperties, doorProperties));
var car = new Car(carProperties);
LOGGER.info("Here is our car:");
LOGGER.info("-> model: {}", car.getModel().orElseThrow());
LOGGER.info("-> price: {}", car.getPrice().orElseThrow());
LOGGER.info("-> parts: ");
car.getParts().forEach(p -> LOGGER.info("\t{}/{}/{}",
p.getType().orElse(null),
p.getModel().orElse(null),
p.getPrice().orElse(null))
);
}
|
@Test
void shouldExecuteAppWithoutException() {
assertDoesNotThrow(() -> App.main(null));
}
|
@ConstantFunction(name = "multiply", argTypes = {LARGEINT, LARGEINT}, returnType = LARGEINT)
public static ConstantOperator multiplyLargeInt(ConstantOperator first, ConstantOperator second) {
return ConstantOperator.createLargeInt(first.getLargeInt().multiply(second.getLargeInt()));
}
|
@Test
public void multiplyLargeInt() {
assertEquals("10000",
ScalarOperatorFunctions.multiplyLargeInt(O_LI_100, O_LI_100).getLargeInt().toString());
}
|
public void shutdownConfigRetriever() {
retriever.shutdown();
}
|
@Test
void components_can_be_created_from_config() {
writeBootstrapConfigs();
dirConfigSource.writeConfig("test", "stringVal \"myString\"");
Container container = newContainer(dirConfigSource);
ComponentTakingConfig component = createComponentTakingConfig(getNewComponentGraph(container));
assertEquals("myString", component.config.stringVal());
container.shutdownConfigRetriever();
}
|
private void run() {
pushMetrics();
if (pipelineResult != null) {
PipelineResult.State pipelineState = pipelineResult.getState();
if (pipelineState.isTerminal()) {
tearDown();
}
}
}
|
@Category({ValidatesRunner.class, UsesAttemptedMetrics.class, UsesCounterMetrics.class})
@Test
public void pushesUserMetrics() throws Exception {
TestMetricsSink.clear();
pipeline
.apply(
// Use maxReadTime to force unbounded mode.
GenerateSequence.from(0).to(NUM_ELEMENTS).withMaxReadTime(Duration.standardDays(1)))
.apply(ParDo.of(new CountingDoFn()));
pipeline.run();
// give metrics pusher time to push
Thread.sleep(
(pipeline.getOptions().as(MetricsOptions.class).getMetricsPushPeriod() + 1L) * 1000);
assertThat(TestMetricsSink.getCounterValue(COUNTER_NAME), is(NUM_ELEMENTS));
}
|
@SuppressWarnings({"unchecked", "rawtypes"})
@Override
public @Nullable <InputT> TransformEvaluator<InputT> forApplication(
AppliedPTransform<?, ?, ?> application, CommittedBundle<?> inputBundle) {
return createEvaluator((AppliedPTransform) application);
}
|
@Test
public void evaluatorClosesReaderAndResumesFromCheckpoint() throws Exception {
ContiguousSet<Long> elems = ContiguousSet.create(Range.closed(0L, 20L), DiscreteDomain.longs());
TestUnboundedSource<Long> source =
new TestUnboundedSource<>(BigEndianLongCoder.of(), elems.toArray(new Long[0]));
PCollection<Long> pcollection = p.apply(Read.from(source));
AppliedPTransform<?, ?, ?> sourceTransform = DirectGraphs.getGraph(p).getProducer(pcollection);
when(context.createRootBundle()).thenReturn(bundleFactory.createRootBundle());
UncommittedBundle<Long> output = bundleFactory.createBundle(pcollection);
when(context.createBundle(pcollection)).thenReturn(output);
WindowedValue<UnboundedSourceShard<Long, TestCheckpointMark>> shard =
WindowedValue.valueInGlobalWindow(
UnboundedSourceShard.unstarted(source, NeverDeduplicator.create()));
CommittedBundle<UnboundedSourceShard<Long, TestCheckpointMark>> inputBundle =
bundleFactory
.<UnboundedSourceShard<Long, TestCheckpointMark>>createRootBundle()
.add(shard)
.commit(Instant.now());
UnboundedReadEvaluatorFactory factory =
new UnboundedReadEvaluatorFactory(context, p.getOptions(), 0.0 /* never reuse */);
TransformEvaluator<UnboundedSourceShard<Long, TestCheckpointMark>> evaluator =
factory.forApplication(sourceTransform, inputBundle);
evaluator.processElement(shard);
TransformResult<UnboundedSourceShard<Long, TestCheckpointMark>> result =
evaluator.finishBundle();
CommittedBundle<UnboundedSourceShard<Long, TestCheckpointMark>> residual =
inputBundle.withElements(
(Iterable<WindowedValue<UnboundedSourceShard<Long, TestCheckpointMark>>>)
result.getUnprocessedElements());
TransformEvaluator<UnboundedSourceShard<Long, TestCheckpointMark>> secondEvaluator =
factory.forApplication(sourceTransform, residual);
secondEvaluator.processElement(Iterables.getOnlyElement(residual.getElements()));
secondEvaluator.finishBundle();
assertThat(TestUnboundedSource.readerClosedCount, equalTo(2));
assertThat(
Iterables.getOnlyElement(residual.getElements()).getValue().getCheckpoint().isFinalized(),
is(true));
}
|
public static List<Event> computeEventDiff(final Params params) {
final List<Event> events = new ArrayList<>();
emitPerNodeDiffEvents(createBaselineParams(params), events);
emitWholeClusterDiffEvent(createBaselineParams(params), events);
emitDerivedBucketSpaceStatesDiffEvents(params, events);
return events;
}
|
@Test
void feed_block_engage_edge_with_node_exhaustion_info_emits_cluster_and_node_events() {
final EventFixture fixture = EventFixture.createForNodes(3)
.clusterStateBefore("distributor:3 storage:3")
.feedBlockBefore(null)
.clusterStateAfter("distributor:3 storage:3")
.feedBlockAfter(ClusterStateBundle.FeedBlock.blockedWith(
"we're closed", setOf(exhaustion(1, "oil"))));
final List<Event> events = fixture.computeEventDiff();
assertThat(events.size(), equalTo(2));
assertThat(events, hasItem(allOf(
eventForNode(storageNode(1)),
nodeEventWithDescription("Added resource exhaustion: oil on node 1 [unknown hostname] is 80.0% full (the configured limit is 70.0%)"),
nodeEventForBaseline())));
assertThat(events, hasItem(
clusterEventWithDescription("Cluster feed blocked due to resource exhaustion: we're closed")));
}
|
@Override
public void seek(long newPos) {
Preconditions.checkState(!closed, "already closed");
Preconditions.checkArgument(newPos >= 0, "position is negative: %s", newPos);
pos = newPos;
try {
channel.seek(newPos);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
|
@Test
public void testSeek() throws Exception {
BlobId blobId = BlobId.fromGsUtilUri("gs://bucket/path/to/seek.dat");
byte[] data = randomData(1024 * 1024);
writeGCSData(blobId, data);
try (SeekableInputStream in =
new GCSInputStream(storage, blobId, null, gcpProperties, MetricsContext.nullMetrics())) {
in.seek(data.length / 2);
byte[] actual = new byte[data.length / 2];
IOUtil.readFully(in, actual, 0, data.length / 2);
byte[] expected = Arrays.copyOfRange(data, data.length / 2, data.length);
assertThat(actual).isEqualTo(expected);
}
}
|
@Override
public boolean isValid(final int timeout) throws SQLException {
return databaseConnectionManager.isValid(timeout);
}
|
@Test
void assertIsValidWhenEmptyConnection() throws SQLException {
try (ShardingSphereConnection connection = new ShardingSphereConnection(DefaultDatabase.LOGIC_NAME, mockContextManager())) {
assertTrue(connection.isValid(0));
}
}
|
public static long getTransactionId(String xid) {
if (xid == null) {
return -1;
}
int idx = xid.lastIndexOf(":");
return Long.parseLong(xid.substring(idx + 1));
}
|
@Test
public void testGetTransactionId() {
assertThat(XID.getTransactionId(null)).isEqualTo(-1);
assertThat(XID.getTransactionId("127.0.0.1:8080:8577662204289747564")).isEqualTo(8577662204289747564L);
}
|
@Override
public boolean isAdaptedLogger(Class<?> loggerClass) {
Class<?> expectedLoggerClass = getExpectedLoggerClass();
return null != expectedLoggerClass && expectedLoggerClass.isAssignableFrom(loggerClass);
}
|
@Test
void testIsAdaptedLogger() {
assertTrue(log4J2NacosLoggingAdapter.isAdaptedLogger(org.apache.logging.slf4j.Log4jLogger.class));
assertFalse(log4J2NacosLoggingAdapter.isAdaptedLogger(Logger.class));
}
|
@Override
public RouteContext route(final RouteContext routeContext, final BroadcastRule broadcastRule) {
Collection<String> logicTableNames = broadcastRule.getBroadcastRuleTableNames(broadcastRuleTableNames);
if (logicTableNames.isEmpty()) {
routeContext.getRouteUnits().addAll(getRouteContext(broadcastRule).getRouteUnits());
} else {
routeContext.getRouteUnits().addAll(getRouteContext(broadcastRule, logicTableNames).getRouteUnits());
}
return routeContext;
}
|
@Test
void assertRouteWithoutBroadcastRuleTable() {
Collection<String> broadcastRuleTableNames = Collections.singleton("t_address");
BroadcastTableBroadcastRoutingEngine engine = new BroadcastTableBroadcastRoutingEngine(broadcastRuleTableNames);
BroadcastRule broadcastRule = mock(BroadcastRule.class);
when(broadcastRule.getDataSourceNames()).thenReturn(Arrays.asList("ds_0", "ds_1"));
when(broadcastRule.getBroadcastRuleTableNames(any())).thenReturn(Collections.emptyList());
RouteContext routeContext = engine.route(new RouteContext(), broadcastRule);
assertThat(routeContext.getRouteUnits().size(), is(2));
Iterator<RouteUnit> iterator = routeContext.getRouteUnits().iterator();
assertRouteMapper(iterator.next(), "ds_0", "");
assertRouteMapper(iterator.next(), "ds_1", "");
}
|
public static <E, T> Set<T> toSet(Collection<E> collection, Function<E, T> function) {
return toSet(collection, function, false);
}
|
@Test
public void testTranslate2Set() {
Set<String> set = CollStreamUtil.toSet(null, Student::getName);
assertEquals(set, Collections.EMPTY_SET);
List<Student> students = new ArrayList<>();
set = CollStreamUtil.toSet(students, Student::getName);
assertEquals(set, Collections.EMPTY_SET);
students.add(new Student(1, 1, 1, "张三"));
students.add(new Student(1, 2, 2, "李四"));
students.add(new Student(2, 1, 1, "李四"));
students.add(new Student(2, 2, 2, "李四"));
students.add(new Student(2, 3, 2, "霸天虎"));
set = CollStreamUtil.toSet(students, Student::getName);
Set<String> compare = new HashSet<>();
compare.add("张三");
compare.add("李四");
compare.add("霸天虎");
assertEquals(set, compare);
}
|
@PublicEvolving
public static <IN, OUT> TypeInformation<OUT> getMapReturnTypes(
MapFunction<IN, OUT> mapInterface, TypeInformation<IN> inType) {
return getMapReturnTypes(mapInterface, inType, null, false);
}
|
@Test
void testFunctionDependingOnInputWithTupleInputWithTypeMismatch() {
IdentityMapper2<Boolean> function = new IdentityMapper2<Boolean>();
TypeInformation<Tuple2<Boolean, String>> inputType =
new TupleTypeInfo<Tuple2<Boolean, String>>(
BasicTypeInfo.BOOLEAN_TYPE_INFO, BasicTypeInfo.INT_TYPE_INFO);
// input is: Tuple2<Boolean, Integer>
// allowed: Tuple2<?, String>
assertThatThrownBy(() -> TypeExtractor.getMapReturnTypes(function, inputType))
.isInstanceOf(InvalidTypesException.class);
}
|
public static int chineseToNumber(String chinese) {
final int length = chinese.length();
int result = 0;
// 节总和
int section = 0;
int number = 0;
ChineseUnit unit = null;
char c;
for (int i = 0; i < length; i++) {
c = chinese.charAt(i);
final int num = chineseToNumber(c);
if (num >= 0) {
if (num == 0) {
// 遇到零时节结束,权位失效,比如两万二零一十
if (number > 0 && null != unit) {
section += number * (unit.value / 10);
}
unit = null;
} else if (number > 0) {
// 多个数字同时出现,报错
throw new IllegalArgumentException(StrUtil.format("Bad number '{}{}' at: {}", chinese.charAt(i - 1), c, i));
}
// 普通数字
number = num;
} else {
unit = chineseToUnit(c);
if (null == unit) {
// 出现非法字符
throw new IllegalArgumentException(StrUtil.format("Unknown unit '{}' at: {}", c, i));
}
//单位
if (unit.secUnit) {
// 节单位,按照节求和
section = (section + number) * unit.value;
result += section;
section = 0;
} else {
// 非节单位,和单位前的单数字组合为值
int unitNumber = number;
if (0 == number && 0 == i) {
// issue#1726,对于单位开头的数组,默认赋予1
// 十二 -> 一十二
// 百二 -> 一百二
unitNumber = 1;
}
section += (unitNumber * unit.value);
}
number = 0;
}
}
if (number > 0 && null != unit) {
number = number * (unit.value / 10);
}
return result + section + number;
}
|
@Test
public void badNumberTest2() {
assertThrows(IllegalArgumentException.class, () -> {
// 非法字符
NumberChineseFormatter.chineseToNumber("一百你三");
});
}
|
@Activate
public void activate(ComponentContext context) {
providerService = providerRegistry.register(this);
appId = coreService.registerApplication(APP_NAME);
controller.addXmppDeviceListener(deviceListener);
logger.info("Started");
}
|
@Test
public void activate() throws Exception {
assertTrue("Provider should be registered", deviceRegistry.getProviders().contains(provider.id()));
assertEquals("Incorrect device service", deviceService, provider.deviceService);
assertEquals("Incorrect provider service", providerService, provider.providerService);
assertEquals("Incorrent application id", appId, provider.appId);
assertNotNull("XMPP device listener should be added", xmppController.listener);
}
|
public List<String> toPrefix(String in) {
List<String> tokens = buildTokens(alignINClause(in));
List<String> output = new ArrayList<>();
List<String> stack = new ArrayList<>();
for (String token : tokens) {
if (isOperand(token)) {
if (token.equals(")")) {
while (openParanthesesFound(stack)) {
output.add(stack.remove(stack.size() - 1));
}
if (!stack.isEmpty()) {
// temporarily fix for issue #189
stack.remove(stack.size() - 1);
}
} else {
while (openParanthesesFound(stack) && !hasHigherPrecedence(token, stack.get(stack.size() - 1))) {
output.add(stack.remove(stack.size() - 1));
}
stack.add(token);
}
} else {
output.add(token);
}
}
while (!stack.isEmpty()) {
output.add(stack.remove(stack.size() - 1));
}
return output;
}
|
@Test
public void parserShouldThrowOnInvalidInput() {
parser.toPrefix(")");
}
|
public int compare(Logger l1, Logger l2) {
if (l1.getName().equals(l2.getName())) {
return 0;
}
if (l1.getName().equals(Logger.ROOT_LOGGER_NAME)) {
return -1;
}
if (l2.getName().equals(Logger.ROOT_LOGGER_NAME)) {
return 1;
}
return l1.getName().compareTo(l2.getName());
}
|
@Test
public void testSmoke() {
assertEquals(0, comparator.compare(a, a));
assertEquals(-1, comparator.compare(a, b));
assertEquals(1, comparator.compare(b, a));
assertEquals(-1, comparator.compare(root, a));
// following two tests failed before bug #127 was fixed
assertEquals(1, comparator.compare(a, root));
assertEquals(0, comparator.compare(root, root));
}
|
public SessionFactory getSessionFactory() {
return sessionFactory;
}
|
@Test
void hasASessionFactory() throws Exception {
assertThat(healthCheck().getSessionFactory())
.isEqualTo(factory);
}
|
void add(StorageType[] storageTypes, BlockStoragePolicy policy) {
StorageTypeAllocation storageCombo =
new StorageTypeAllocation(storageTypes, policy);
Long count = storageComboCounts.get(storageCombo);
if (count == null) {
storageComboCounts.put(storageCombo, 1l);
storageCombo.setActualStoragePolicy(
getStoragePolicy(storageCombo.getStorageTypes()));
} else {
storageComboCounts.put(storageCombo, count.longValue()+1);
}
totalBlocks++;
}
|
@Test
public void testMultipleWarmsInDifferentOrder() {
BlockStoragePolicySuite bsps = BlockStoragePolicySuite.createDefaultSuite();
StoragePolicySummary sts = new StoragePolicySummary(bsps.getAllPolicies());
BlockStoragePolicy warm = bsps.getPolicy("WARM");
//DISK:1,ARCHIVE:1
sts.add(new StorageType[]{StorageType.DISK,StorageType.ARCHIVE},warm);
sts.add(new StorageType[]{StorageType.ARCHIVE,StorageType.DISK},warm);
//DISK:2,ARCHIVE:1
sts.add(new StorageType[]{StorageType.ARCHIVE,
StorageType.DISK,StorageType.DISK},warm);
sts.add(new StorageType[]{StorageType.DISK,
StorageType.ARCHIVE,StorageType.DISK},warm);
sts.add(new StorageType[]{StorageType.DISK,
StorageType.DISK,StorageType.ARCHIVE},warm);
//DISK:1,ARCHIVE:2
sts.add(new StorageType[]{StorageType.DISK,
StorageType.ARCHIVE,StorageType.ARCHIVE},warm);
sts.add(new StorageType[]{StorageType.ARCHIVE,
StorageType.DISK,StorageType.ARCHIVE},warm);
sts.add(new StorageType[]{StorageType.ARCHIVE,
StorageType.ARCHIVE,StorageType.DISK},warm);
//DISK:2,ARCHIVE:2
sts.add(new StorageType[]{StorageType.ARCHIVE,
StorageType.ARCHIVE,StorageType.DISK,StorageType.DISK},warm);
Map<String, Long> actualOutput = convertToStringMap(sts);
Assert.assertEquals(4,actualOutput.size());
Map<String, Long> expectedOutput = new HashMap<>();
expectedOutput.put("WARM|DISK:1,ARCHIVE:1(WARM)", 2l);
expectedOutput.put("WARM|DISK:2,ARCHIVE:1", 3l);
expectedOutput.put("WARM|DISK:1,ARCHIVE:2(WARM)", 3l);
expectedOutput.put("WARM|DISK:2,ARCHIVE:2", 1l);
Assert.assertEquals(expectedOutput,actualOutput);
}
|
public static long hash64(CharSequence data) {
return hash64(StrUtil.bytes(data, DEFAULT_CHARSET));
}
|
@Test
public void hash64Test() {
long hv = MurmurHash.hash64(StrUtil.utf8Bytes("你"));
assertEquals(-1349759534971957051L, hv);
hv = MurmurHash.hash64(StrUtil.utf8Bytes("你好"));
assertEquals(-7563732748897304996L, hv);
hv = MurmurHash.hash64(StrUtil.utf8Bytes("见到你很高兴"));
assertEquals(-766658210119995316L, hv);
hv = MurmurHash.hash64(StrUtil.utf8Bytes("我们将通过生成一个大的文件的方式来检验各种方法的执行效率因为这种方式在结束的时候需要执行文件"));
assertEquals(-7469283059271653317L, hv);
}
|
public static String baToHexString(byte[] ba) {
StringBuilder sb = new StringBuilder(ba.length * 2);
for (byte b : ba) {
int j = b & 0xff;
if (j < 16) {
sb.append('0'); // $NON-NLS-1$ add zero padding
}
sb.append(Integer.toHexString(j));
}
return sb.toString();
}
|
@Test
public void testBaToHexStringSeparator() {
assertEquals("", JOrphanUtils.baToHexString(new byte[]{}, '-'));
assertEquals("00", JOrphanUtils.baToHexString(new byte[]{0}, '-'));
assertEquals("0f-10-7f-80-81-ff", JOrphanUtils.baToHexString(new byte[]{15, 16, 127, -128, -127, -1}, '-'));
}
|
public Map<String, String> parse(String body) {
final ImmutableMap.Builder<String, String> newLookupBuilder = ImmutableMap.builder();
final String[] lines = body.split(lineSeparator);
for (String line : lines) {
if (line.startsWith(this.ignorechar)) {
continue;
}
final String[] values = line.split(this.splitPattern);
if (values.length <= Math.max(keyColumn, keyOnly ? 0 : valueColumn)) {
continue;
}
final String key = this.caseInsensitive ? values[keyColumn].toLowerCase(Locale.ENGLISH) : values[keyColumn];
final String value = this.keyOnly ? "" : values[valueColumn].trim();
final String finalKey = Strings.isNullOrEmpty(quoteChar) ? key.trim() : key.trim().replaceAll("^" + quoteChar + "|" + quoteChar + "$", "");
final String finalValue = Strings.isNullOrEmpty(quoteChar) ? value.trim() : value.trim().replaceAll("^" + quoteChar + "|" + quoteChar + "$", "");
newLookupBuilder.put(finalKey, finalValue);
}
return newLookupBuilder.build();
}
|
@Test
public void parseSimpleFile() throws Exception {
final String input = "# Sample file for testing\n" +
"foo:23\n" +
"bar:42\n" +
"baz:17";
final DSVParser dsvParser = new DSVParser("#", "\n", ":", "", false, false, 0, Optional.of(1));
final Map<String, String> result = dsvParser.parse(input);
assertThat(result)
.isNotNull()
.isNotEmpty()
.hasSize(3)
.containsExactly(
new AbstractMap.SimpleEntry<>("foo", "23"),
new AbstractMap.SimpleEntry<>("bar", "42"),
new AbstractMap.SimpleEntry<>("baz", "17")
);
}
|
public static <T> Set<T> ofNullable(@Nullable T obj) {
return obj == null ? Collections.emptySet() : Collections.singleton(obj);
}
|
@Test
void testOfNullableWithNull() {
assertThat(CollectionUtil.ofNullable(null)).isEmpty();
}
|
MessageType fromParquetSchema(List<SchemaElement> schema, List<ColumnOrder> columnOrders) {
Iterator<SchemaElement> iterator = schema.iterator();
SchemaElement root = iterator.next();
Types.MessageTypeBuilder builder = Types.buildMessage();
if (root.isSetField_id()) {
builder.id(root.field_id);
}
buildChildren(builder, iterator, root.getNum_children(), columnOrders, 0);
return builder.named(root.name);
}
|
@Test
public void testMapConvertedTypeReadWrite() throws Exception {
List<SchemaElement> oldConvertedTypeSchemaElements = new ArrayList<>();
oldConvertedTypeSchemaElements.add(new SchemaElement("example").setNum_children(1));
oldConvertedTypeSchemaElements.add(new SchemaElement("testMap")
.setRepetition_type(FieldRepetitionType.REQUIRED)
.setNum_children(1)
.setConverted_type(ConvertedType.MAP)
.setLogicalType(null));
oldConvertedTypeSchemaElements.add(new SchemaElement("map")
.setRepetition_type(FieldRepetitionType.REPEATED)
.setNum_children(2)
.setConverted_type(ConvertedType.MAP_KEY_VALUE)
.setLogicalType(null));
oldConvertedTypeSchemaElements.add(new SchemaElement("key")
.setType(Type.BYTE_ARRAY)
.setRepetition_type(FieldRepetitionType.REQUIRED)
.setConverted_type(ConvertedType.UTF8)
.setLogicalType(null));
oldConvertedTypeSchemaElements.add(new SchemaElement("value")
.setType(Type.INT64)
.setRepetition_type(FieldRepetitionType.REQUIRED)
.setConverted_type(null)
.setLogicalType(null));
ParquetMetadataConverter parquetMetadataConverter = new ParquetMetadataConverter();
MessageType messageType = parquetMetadataConverter.fromParquetSchema(oldConvertedTypeSchemaElements, null);
verifyMapMessageType(messageType, "map");
}
|
@VisibleForTesting
URL getAuthenticationUrl(@Nullable Credential credential, Map<String, String> repositoryScopes)
throws MalformedURLException {
return isOAuth2Auth(credential)
? new URL(realm) // Required parameters will be sent via POST .
: new URL(realm + "?" + getServiceScopeRequestParameters(repositoryScopes));
}
|
@Test
public void getAuthenticationUrl_basicAuth() throws MalformedURLException {
Assert.assertEquals(
new URL("https://somerealm?service=someservice&scope=repository:someimage:scope"),
registryAuthenticator.getAuthenticationUrl(
null, Collections.singletonMap("someimage", "scope")));
}
|
private String getEnv(String envName, InterpreterLaunchContext context) {
String env = context.getProperties().getProperty(envName);
if (StringUtils.isBlank(env)) {
env = System.getenv(envName);
}
if (StringUtils.isBlank(env)) {
LOGGER.warn("environment variable: {} is empty", envName);
}
return env;
}
|
@Test
void testYarnClusterMode_1() throws IOException {
SparkInterpreterLauncher launcher = new SparkInterpreterLauncher(zConf, null);
Properties properties = new Properties();
properties.setProperty("SPARK_HOME", sparkHome);
properties.setProperty("property_1", "value_1");
properties.setProperty("spark.master", "yarn-cluster");
properties.setProperty("spark.files", "file_1");
properties.setProperty("spark.jars", "jar_1");
InterpreterOption option = new InterpreterOption();
InterpreterLaunchContext context = new InterpreterLaunchContext(properties, option, null, "user1", "intpGroupId", "groupId", "spark", "spark", 0, "host");
InterpreterClient client = launcher.launch(context);
assertTrue( client instanceof ExecRemoteInterpreterProcess);
try (ExecRemoteInterpreterProcess interpreterProcess = (ExecRemoteInterpreterProcess) client) {
assertEquals("spark", interpreterProcess.getInterpreterSettingName());
assertTrue(interpreterProcess.getInterpreterDir().endsWith("/interpreter/spark"));
assertTrue(interpreterProcess.getLocalRepoDir().endsWith("/local-repo/groupId"));
assertEquals(zConf.getInterpreterRemoteRunnerPath(), interpreterProcess.getInterpreterRunner());
assertTrue(interpreterProcess.getEnv().size() >= 3);
assertEquals(sparkHome, interpreterProcess.getEnv().get("SPARK_HOME"));
assertEquals("true", interpreterProcess.getEnv().get("ZEPPELIN_SPARK_YARN_CLUSTER"));
String sparkJars = "jar_1," +
zeppelinHome + "/interpreter/spark/scala-2.12/spark-scala-2.12-" + Util.getVersion()
+ ".jar," +
zeppelinHome + "/interpreter/zeppelin-interpreter-shaded-" + Util.getVersion() + ".jar";
String sparkrZip = sparkHome + "/R/lib/sparkr.zip#sparkr";
String sparkFiles = "file_1," + zeppelinHome + "/conf/log4j_yarn_cluster.properties";
String expected = "--conf|spark.yarn.dist.archives=" + sparkrZip +
"|--conf|spark.yarn.maxAppAttempts=1" +
"|--conf|spark.files=" + sparkFiles +
"|--conf|spark.jars=" + sparkJars +
"|--conf|spark.yarn.isPython=true" +
"|--conf|spark.yarn.submit.waitAppCompletion=false" +
"|--conf|spark.app.name=intpGroupId" +
"|--conf|spark.master=yarn-cluster";
assertTrue(CollectionUtils.isEqualCollection(Arrays.asList(expected.split("\\|")),
Arrays.asList(interpreterProcess.getEnv().get("ZEPPELIN_SPARK_CONF").split("\\|"))));
}
}
|
public RuntimeOptionsBuilder parse(Map<String, String> properties) {
return parse(properties::get);
}
|
@Test
void should_parse_object_factory() {
properties.put(Constants.OBJECT_FACTORY_PROPERTY_NAME, CustomObjectFactory.class.getName());
RuntimeOptions options = cucumberPropertiesParser.parse(properties).build();
assertThat(options.getObjectFactoryClass(), equalTo(CustomObjectFactory.class));
}
|
public void decode(ByteBuf buffer) {
boolean last;
int statusCode;
while (true) {
switch(state) {
case READ_COMMON_HEADER:
if (buffer.readableBytes() < SPDY_HEADER_SIZE) {
return;
}
int frameOffset = buffer.readerIndex();
int flagsOffset = frameOffset + SPDY_HEADER_FLAGS_OFFSET;
int lengthOffset = frameOffset + SPDY_HEADER_LENGTH_OFFSET;
buffer.skipBytes(SPDY_HEADER_SIZE);
boolean control = (buffer.getByte(frameOffset) & 0x80) != 0;
int version;
int type;
if (control) {
// Decode control frame common header
version = getUnsignedShort(buffer, frameOffset) & 0x7FFF;
type = getUnsignedShort(buffer, frameOffset + SPDY_HEADER_TYPE_OFFSET);
streamId = 0; // Default to session Stream-ID
} else {
// Decode data frame common header
version = spdyVersion; // Default to expected version
type = SPDY_DATA_FRAME;
streamId = getUnsignedInt(buffer, frameOffset);
}
flags = buffer.getByte(flagsOffset);
length = getUnsignedMedium(buffer, lengthOffset);
// Check version first then validity
if (version != spdyVersion) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SPDY Version");
} else if (!isValidFrameHeader(streamId, type, flags, length)) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid Frame Error");
} else {
state = getNextState(type, length);
}
break;
case READ_DATA_FRAME:
if (length == 0) {
state = State.READ_COMMON_HEADER;
delegate.readDataFrame(streamId, hasFlag(flags, SPDY_DATA_FLAG_FIN), Unpooled.buffer(0));
break;
}
// Generate data frames that do not exceed maxChunkSize
int dataLength = Math.min(maxChunkSize, length);
// Wait until entire frame is readable
if (buffer.readableBytes() < dataLength) {
return;
}
ByteBuf data = buffer.alloc().buffer(dataLength);
data.writeBytes(buffer, dataLength);
length -= dataLength;
if (length == 0) {
state = State.READ_COMMON_HEADER;
}
last = length == 0 && hasFlag(flags, SPDY_DATA_FLAG_FIN);
delegate.readDataFrame(streamId, last, data);
break;
case READ_SYN_STREAM_FRAME:
if (buffer.readableBytes() < 10) {
return;
}
int offset = buffer.readerIndex();
streamId = getUnsignedInt(buffer, offset);
int associatedToStreamId = getUnsignedInt(buffer, offset + 4);
byte priority = (byte) (buffer.getByte(offset + 8) >> 5 & 0x07);
last = hasFlag(flags, SPDY_FLAG_FIN);
boolean unidirectional = hasFlag(flags, SPDY_FLAG_UNIDIRECTIONAL);
buffer.skipBytes(10);
length -= 10;
if (streamId == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SYN_STREAM Frame");
} else {
state = State.READ_HEADER_BLOCK;
delegate.readSynStreamFrame(streamId, associatedToStreamId, priority, last, unidirectional);
}
break;
case READ_SYN_REPLY_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
last = hasFlag(flags, SPDY_FLAG_FIN);
buffer.skipBytes(4);
length -= 4;
if (streamId == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SYN_REPLY Frame");
} else {
state = State.READ_HEADER_BLOCK;
delegate.readSynReplyFrame(streamId, last);
}
break;
case READ_RST_STREAM_FRAME:
if (buffer.readableBytes() < 8) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
statusCode = getSignedInt(buffer, buffer.readerIndex() + 4);
buffer.skipBytes(8);
if (streamId == 0 || statusCode == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid RST_STREAM Frame");
} else {
state = State.READ_COMMON_HEADER;
delegate.readRstStreamFrame(streamId, statusCode);
}
break;
case READ_SETTINGS_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
boolean clear = hasFlag(flags, SPDY_SETTINGS_CLEAR);
numSettings = getUnsignedInt(buffer, buffer.readerIndex());
buffer.skipBytes(4);
length -= 4;
// Validate frame length against number of entries. Each ID/Value entry is 8 bytes.
if ((length & 0x07) != 0 || length >> 3 != numSettings) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SETTINGS Frame");
} else {
state = State.READ_SETTING;
delegate.readSettingsFrame(clear);
}
break;
case READ_SETTING:
if (numSettings == 0) {
state = State.READ_COMMON_HEADER;
delegate.readSettingsEnd();
break;
}
if (buffer.readableBytes() < 8) {
return;
}
byte settingsFlags = buffer.getByte(buffer.readerIndex());
int id = getUnsignedMedium(buffer, buffer.readerIndex() + 1);
int value = getSignedInt(buffer, buffer.readerIndex() + 4);
boolean persistValue = hasFlag(settingsFlags, SPDY_SETTINGS_PERSIST_VALUE);
boolean persisted = hasFlag(settingsFlags, SPDY_SETTINGS_PERSISTED);
buffer.skipBytes(8);
--numSettings;
delegate.readSetting(id, value, persistValue, persisted);
break;
case READ_PING_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
int pingId = getSignedInt(buffer, buffer.readerIndex());
buffer.skipBytes(4);
state = State.READ_COMMON_HEADER;
delegate.readPingFrame(pingId);
break;
case READ_GOAWAY_FRAME:
if (buffer.readableBytes() < 8) {
return;
}
int lastGoodStreamId = getUnsignedInt(buffer, buffer.readerIndex());
statusCode = getSignedInt(buffer, buffer.readerIndex() + 4);
buffer.skipBytes(8);
state = State.READ_COMMON_HEADER;
delegate.readGoAwayFrame(lastGoodStreamId, statusCode);
break;
case READ_HEADERS_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
last = hasFlag(flags, SPDY_FLAG_FIN);
buffer.skipBytes(4);
length -= 4;
if (streamId == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid HEADERS Frame");
} else {
state = State.READ_HEADER_BLOCK;
delegate.readHeadersFrame(streamId, last);
}
break;
case READ_WINDOW_UPDATE_FRAME:
if (buffer.readableBytes() < 8) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
int deltaWindowSize = getUnsignedInt(buffer, buffer.readerIndex() + 4);
buffer.skipBytes(8);
if (deltaWindowSize == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid WINDOW_UPDATE Frame");
} else {
state = State.READ_COMMON_HEADER;
delegate.readWindowUpdateFrame(streamId, deltaWindowSize);
}
break;
case READ_HEADER_BLOCK:
if (length == 0) {
state = State.READ_COMMON_HEADER;
delegate.readHeaderBlockEnd();
break;
}
if (!buffer.isReadable()) {
return;
}
int compressedBytes = Math.min(buffer.readableBytes(), length);
ByteBuf headerBlock = buffer.alloc().buffer(compressedBytes);
headerBlock.writeBytes(buffer, compressedBytes);
length -= compressedBytes;
delegate.readHeaderBlock(headerBlock);
break;
case DISCARD_FRAME:
int numBytes = Math.min(buffer.readableBytes(), length);
buffer.skipBytes(numBytes);
length -= numBytes;
if (length == 0) {
state = State.READ_COMMON_HEADER;
break;
}
return;
case FRAME_ERROR:
buffer.skipBytes(buffer.readableBytes());
return;
default:
throw new Error("Shouldn't reach here.");
}
}
}
|
@Test
public void testIllegalSpdyDataFrameStreamId() throws Exception {
int streamId = 0; // illegal stream identifier
byte flags = 0;
int length = 0;
ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length);
encodeDataFrameHeader(buf, streamId, flags, length);
decoder.decode(buf);
verify(delegate).readFrameError((String) any());
assertFalse(buf.isReadable());
buf.release();
}
|
public Response get(URL url, Request request) throws IOException {
return call(HttpMethods.GET, url, request);
}
|
@Test
public void testGet_secureClientOnNonListeningServerAndNoPortSpecified() throws IOException {
FailoverHttpClient httpClient = newHttpClient(false, false);
Mockito.when(mockHttpRequest.execute())
.thenThrow(new ConnectException("my exception")); // server not listening on 443
try (Response response = httpClient.get(new URL("https://insecure"), fakeRequest(null))) {
Assert.fail("Should not fall back to HTTP if secure client");
} catch (ConnectException ex) {
Assert.assertEquals("my exception", ex.getMessage());
verifyCapturedUrls("https://insecure");
Mockito.verify(mockHttpRequest, Mockito.times(1)).execute();
Mockito.verifyNoInteractions(mockInsecureHttpRequest, logger);
}
}
|
@Nullable
@Override
public Message decode(@Nonnull RawMessage rawMessage) {
final String msg = new String(rawMessage.getPayload(), charset);
try (Timer.Context ignored = this.decodeTime.time()) {
final ResolvableInetSocketAddress address = rawMessage.getRemoteAddress();
final InetSocketAddress remoteAddress;
if (address == null) {
remoteAddress = null;
} else {
remoteAddress = address.getInetSocketAddress();
}
return parse(msg, remoteAddress == null ? null : remoteAddress.getAddress(), rawMessage.getTimestamp());
}
}
|
@Test
public void testDecodeStructuredWithFullMessage() throws Exception {
when(configuration.getBoolean(SyslogCodec.CK_STORE_FULL_MESSAGE)).thenReturn(true);
final Message message = codec.decode(buildRawMessage(STRUCTURED));
assertNotNull(message);
assertEquals("BOMAn application event log entry", message.getMessage());
assertEquals(new DateTime("2012-12-25T22:14:15.003Z", DateTimeZone.UTC), ((DateTime) message.getField("timestamp")).withZone(DateTimeZone.UTC));
assertEquals("mymachine.example.com", message.getField("source"));
assertEquals(5, message.getField("level"));
assertEquals("local4", message.getField("facility"));
assertEquals(STRUCTURED, message.getField("full_message"));
assertEquals("Application", message.getField("eventSource"));
assertEquals("1011", message.getField("eventID"));
assertEquals("3", message.getField("iut"));
assertEquals("evntslog", message.getField("application_name"));
assertEquals(20, message.getField("facility_num"));
}
|
public static String byte2FitMemoryString(final long byteNum) {
if (byteNum < 0) {
return "shouldn't be less than zero!";
} else if (byteNum < MemoryConst.KB) {
return String.format("%d B", byteNum);
} else if (byteNum < MemoryConst.MB) {
return String.format("%d KB", byteNum / MemoryConst.KB);
} else if (byteNum < MemoryConst.GB) {
return String.format("%d MB", byteNum / MemoryConst.MB);
} else {
return String.format("%d GB", byteNum / MemoryConst.GB);
}
}
|
@Test
public void byte2FitMemoryStringGB() {
Assert.assertEquals(
"1 GB",
ConvertKit.byte2FitMemoryString(1024 * 1024 * 1024)
);
}
|
public void decode(ByteBuf buffer) {
boolean last;
int statusCode;
while (true) {
switch(state) {
case READ_COMMON_HEADER:
if (buffer.readableBytes() < SPDY_HEADER_SIZE) {
return;
}
int frameOffset = buffer.readerIndex();
int flagsOffset = frameOffset + SPDY_HEADER_FLAGS_OFFSET;
int lengthOffset = frameOffset + SPDY_HEADER_LENGTH_OFFSET;
buffer.skipBytes(SPDY_HEADER_SIZE);
boolean control = (buffer.getByte(frameOffset) & 0x80) != 0;
int version;
int type;
if (control) {
// Decode control frame common header
version = getUnsignedShort(buffer, frameOffset) & 0x7FFF;
type = getUnsignedShort(buffer, frameOffset + SPDY_HEADER_TYPE_OFFSET);
streamId = 0; // Default to session Stream-ID
} else {
// Decode data frame common header
version = spdyVersion; // Default to expected version
type = SPDY_DATA_FRAME;
streamId = getUnsignedInt(buffer, frameOffset);
}
flags = buffer.getByte(flagsOffset);
length = getUnsignedMedium(buffer, lengthOffset);
// Check version first then validity
if (version != spdyVersion) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SPDY Version");
} else if (!isValidFrameHeader(streamId, type, flags, length)) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid Frame Error");
} else {
state = getNextState(type, length);
}
break;
case READ_DATA_FRAME:
if (length == 0) {
state = State.READ_COMMON_HEADER;
delegate.readDataFrame(streamId, hasFlag(flags, SPDY_DATA_FLAG_FIN), Unpooled.buffer(0));
break;
}
// Generate data frames that do not exceed maxChunkSize
int dataLength = Math.min(maxChunkSize, length);
// Wait until entire frame is readable
if (buffer.readableBytes() < dataLength) {
return;
}
ByteBuf data = buffer.alloc().buffer(dataLength);
data.writeBytes(buffer, dataLength);
length -= dataLength;
if (length == 0) {
state = State.READ_COMMON_HEADER;
}
last = length == 0 && hasFlag(flags, SPDY_DATA_FLAG_FIN);
delegate.readDataFrame(streamId, last, data);
break;
case READ_SYN_STREAM_FRAME:
if (buffer.readableBytes() < 10) {
return;
}
int offset = buffer.readerIndex();
streamId = getUnsignedInt(buffer, offset);
int associatedToStreamId = getUnsignedInt(buffer, offset + 4);
byte priority = (byte) (buffer.getByte(offset + 8) >> 5 & 0x07);
last = hasFlag(flags, SPDY_FLAG_FIN);
boolean unidirectional = hasFlag(flags, SPDY_FLAG_UNIDIRECTIONAL);
buffer.skipBytes(10);
length -= 10;
if (streamId == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SYN_STREAM Frame");
} else {
state = State.READ_HEADER_BLOCK;
delegate.readSynStreamFrame(streamId, associatedToStreamId, priority, last, unidirectional);
}
break;
case READ_SYN_REPLY_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
last = hasFlag(flags, SPDY_FLAG_FIN);
buffer.skipBytes(4);
length -= 4;
if (streamId == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SYN_REPLY Frame");
} else {
state = State.READ_HEADER_BLOCK;
delegate.readSynReplyFrame(streamId, last);
}
break;
case READ_RST_STREAM_FRAME:
if (buffer.readableBytes() < 8) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
statusCode = getSignedInt(buffer, buffer.readerIndex() + 4);
buffer.skipBytes(8);
if (streamId == 0 || statusCode == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid RST_STREAM Frame");
} else {
state = State.READ_COMMON_HEADER;
delegate.readRstStreamFrame(streamId, statusCode);
}
break;
case READ_SETTINGS_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
boolean clear = hasFlag(flags, SPDY_SETTINGS_CLEAR);
numSettings = getUnsignedInt(buffer, buffer.readerIndex());
buffer.skipBytes(4);
length -= 4;
// Validate frame length against number of entries. Each ID/Value entry is 8 bytes.
if ((length & 0x07) != 0 || length >> 3 != numSettings) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SETTINGS Frame");
} else {
state = State.READ_SETTING;
delegate.readSettingsFrame(clear);
}
break;
case READ_SETTING:
if (numSettings == 0) {
state = State.READ_COMMON_HEADER;
delegate.readSettingsEnd();
break;
}
if (buffer.readableBytes() < 8) {
return;
}
byte settingsFlags = buffer.getByte(buffer.readerIndex());
int id = getUnsignedMedium(buffer, buffer.readerIndex() + 1);
int value = getSignedInt(buffer, buffer.readerIndex() + 4);
boolean persistValue = hasFlag(settingsFlags, SPDY_SETTINGS_PERSIST_VALUE);
boolean persisted = hasFlag(settingsFlags, SPDY_SETTINGS_PERSISTED);
buffer.skipBytes(8);
--numSettings;
delegate.readSetting(id, value, persistValue, persisted);
break;
case READ_PING_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
int pingId = getSignedInt(buffer, buffer.readerIndex());
buffer.skipBytes(4);
state = State.READ_COMMON_HEADER;
delegate.readPingFrame(pingId);
break;
case READ_GOAWAY_FRAME:
if (buffer.readableBytes() < 8) {
return;
}
int lastGoodStreamId = getUnsignedInt(buffer, buffer.readerIndex());
statusCode = getSignedInt(buffer, buffer.readerIndex() + 4);
buffer.skipBytes(8);
state = State.READ_COMMON_HEADER;
delegate.readGoAwayFrame(lastGoodStreamId, statusCode);
break;
case READ_HEADERS_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
last = hasFlag(flags, SPDY_FLAG_FIN);
buffer.skipBytes(4);
length -= 4;
if (streamId == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid HEADERS Frame");
} else {
state = State.READ_HEADER_BLOCK;
delegate.readHeadersFrame(streamId, last);
}
break;
case READ_WINDOW_UPDATE_FRAME:
if (buffer.readableBytes() < 8) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
int deltaWindowSize = getUnsignedInt(buffer, buffer.readerIndex() + 4);
buffer.skipBytes(8);
if (deltaWindowSize == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid WINDOW_UPDATE Frame");
} else {
state = State.READ_COMMON_HEADER;
delegate.readWindowUpdateFrame(streamId, deltaWindowSize);
}
break;
case READ_HEADER_BLOCK:
if (length == 0) {
state = State.READ_COMMON_HEADER;
delegate.readHeaderBlockEnd();
break;
}
if (!buffer.isReadable()) {
return;
}
int compressedBytes = Math.min(buffer.readableBytes(), length);
ByteBuf headerBlock = buffer.alloc().buffer(compressedBytes);
headerBlock.writeBytes(buffer, compressedBytes);
length -= compressedBytes;
delegate.readHeaderBlock(headerBlock);
break;
case DISCARD_FRAME:
int numBytes = Math.min(buffer.readableBytes(), length);
buffer.skipBytes(numBytes);
length -= numBytes;
if (length == 0) {
state = State.READ_COMMON_HEADER;
break;
}
return;
case FRAME_ERROR:
buffer.skipBytes(buffer.readableBytes());
return;
default:
throw new Error("Shouldn't reach here.");
}
}
}
|
@Test
public void testUnknownSpdySynStreamFrameFlags() throws Exception {
short type = 1;
byte flags = (byte) 0xFC; // undefined flags
int length = 10;
int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01;
int associatedToStreamId = RANDOM.nextInt() & 0x7FFFFFFF;
byte priority = (byte) (RANDOM.nextInt() & 0x07);
ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length);
encodeControlFrameHeader(buf, type, flags, length);
buf.writeInt(streamId);
buf.writeInt(associatedToStreamId);
buf.writeByte(priority << 5);
buf.writeByte(0);
decoder.decode(buf);
verify(delegate).readSynStreamFrame(streamId, associatedToStreamId, priority, false, false);
verify(delegate).readHeaderBlockEnd();
assertFalse(buf.isReadable());
buf.release();
}
|
public static MongoSinkConfig load(String yamlFile) throws IOException {
final ObjectMapper mapper = new ObjectMapper(new YAMLFactory());
final MongoSinkConfig cfg = mapper.readValue(new File(yamlFile), MongoSinkConfig.class);
return cfg;
}
|
@Test
public void testLoadMapConfig() throws IOException {
final Map<String, Object> commonConfigMap = TestHelper.createCommonConfigMap();
commonConfigMap.put("batchSize", TestHelper.BATCH_SIZE);
commonConfigMap.put("batchTimeMs", TestHelper.BATCH_TIME);
SinkContext sinkContext = Mockito.mock(SinkContext.class);
final MongoSinkConfig cfg = MongoSinkConfig.load(commonConfigMap, sinkContext);
assertEquals(cfg.getMongoUri(), TestHelper.URI);
assertEquals(cfg.getDatabase(), TestHelper.DB);
assertEquals(cfg.getCollection(), TestHelper.COLL);
assertEquals(cfg.getBatchSize(), TestHelper.BATCH_SIZE);
assertEquals(cfg.getBatchTimeMs(), TestHelper.BATCH_TIME);
}
|
@Override
public String getTypeAsString() {
String type;
// Handle dynamic array of zero length. This will fail if the dynamic array
// is an array of structs.
if (value.isEmpty()) {
if (StructType.class.isAssignableFrom(getComponentType())) {
type = Utils.getStructType(getComponentType());
} else {
type = AbiTypes.getTypeAString(getComponentType());
}
} else {
if (StructType.class.isAssignableFrom(value.get(0).getClass())) {
type = value.get(0).getTypeAsString();
} else {
type = AbiTypes.getTypeAString(getComponentType());
}
}
return type + "[]";
}
|
@Test
public void testDynamicArrayWithDynamicStruct() {
final List<DynamicStruct> list = Collections.singletonList(new DynamicStruct());
final DynamicArray<DynamicStruct> array = new DynamicArray<>(DynamicStruct.class, list);
assertEquals("()[]", array.getTypeAsString());
}
|
@Override
public String name() {
return icebergTable.toString();
}
|
@Test
public void testTableEquality() throws NoSuchTableException {
CatalogManager catalogManager = spark.sessionState().catalogManager();
TableCatalog catalog = (TableCatalog) catalogManager.catalog(catalogName);
Identifier identifier = Identifier.of(tableIdent.namespace().levels(), tableIdent.name());
SparkTable table1 = (SparkTable) catalog.loadTable(identifier);
SparkTable table2 = (SparkTable) catalog.loadTable(identifier);
// different instances pointing to the same table must be equivalent
Assert.assertNotSame("References must be different", table1, table2);
Assert.assertEquals("Tables must be equivalent", table1, table2);
}
|
@Override
public int getColumnLength(final Object value) {
return ((byte[]) value).length;
}
|
@Test
void assertGetColumnLength() {
assertThat(new PostgreSQLByteaBinaryProtocolValue().getColumnLength(new byte[10]), is(10));
}
|
public static Map<String, FormUrlEncoded.FormPart> read(String body) {
var parts = new HashMap<String, FormUrlEncoded.FormPart>();
for (var s : body.split("&")) {
if (s.isBlank()) {
continue;
}
var pair = s.split("=");
var name = URLDecoder.decode(pair[0].trim(), StandardCharsets.UTF_8);
var part = parts.computeIfAbsent(name, n -> new FormUrlEncoded.FormPart(n, new ArrayList<>()));
if (pair.length > 1) {
var value = URLDecoder.decode(pair[1].trim(), StandardCharsets.UTF_8);
part.values().add(value);
}
}
return parts;
}
|
@Test
void test() {
var string = "val1=2112&val1=3232&val2=test";
var map = FormUrlEncodedServerRequestMapper.read(string);
assertThat(map)
.hasSize(2)
.hasEntrySatisfying("val1", v -> assertThat(v.values()).containsExactly("2112", "3232"))
.hasEntrySatisfying("val2", v -> assertThat(v.values()).containsExactly("test"));
}
|
public static <T> T[] remove(final T[] oldElements, final T elementToRemove)
{
final int length = oldElements.length;
int index = UNKNOWN_INDEX;
for (int i = 0; i < length; i++)
{
if (oldElements[i] == elementToRemove)
{
index = i;
}
}
return remove(oldElements, index);
}
|
@Test
void shouldRemovePresentElementAtEnd()
{
final Integer[] result = ArrayUtil.remove(values, TWO);
assertArrayEquals(new Integer[]{ ONE }, result);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.