focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@Override
public TransactionSendResult sendMessageInTransaction(Message msg,
Object arg) throws MQClientException {
throw new RuntimeException("sendMessageInTransaction not implement, please use TransactionMQProducer class");
}
|
@Test(expected = RuntimeException.class)
public void assertSendMessageInTransaction() throws MQClientException {
TransactionSendResult result = producer.sendMessageInTransaction(message, 1);
assertNull(result);
}
|
@NonNull
@Override
public ConnectionFileName toPvfsFileName( @NonNull FileName providerFileName, @NonNull T details )
throws KettleException {
// Determine the part of provider file name following the connection "root".
// Use the transformer to generate the connection root provider uri.
// Both uris are assumed to be normalized.
// Examples:
// - connectionRootProviderUri: "hcp://domain.my:443/root/path/" | "s3://" | "local://"
// - providerUri: "hcp://domain.my:443/root/path/rest/path" | "s3://rest/path"
// Example: "pvfs://my-connection"
String connectionRootProviderUri = getConnectionRootProviderUriPrefix( details );
String providerUri = providerFileName.getURI();
if ( !connectionFileNameUtils.isDescendantOrSelf( providerUri, connectionRootProviderUri ) ) {
throw new IllegalArgumentException(
String.format(
"Provider file name '%s' is not a descendant of the connection root '%s'.",
providerUri,
connectionRootProviderUri ) );
}
String restUriPath = providerUri.substring( connectionRootProviderUri.length() );
// Examples: "/rest/path" or "rest/path"
return buildPvfsFileName( details, restUriPath, providerFileName.getType() );
}
|
@Test
public void testToPvfsFileNameHandlesConnectionsWithBuckets() throws Exception {
// Example: S3
when( details1.hasBuckets() ).thenReturn( true );
String connectionRootProviderUriPrefix = "scheme1://";
String restPath = "/rest/path";
FileName providerFileName = mockFileNameWithUri( FileName.class, connectionRootProviderUriPrefix + restPath );
ConnectionFileName pvfsFileName = transformer.toPvfsFileName( providerFileName, details1 );
assertEquals( "pvfs://connection-name1" + restPath, pvfsFileName.getURI() );
// Should NOT do connection root provider uri normalization.
verify( kettleVFS, never() ).resolveURI( connectionRootProviderUriPrefix );
}
|
@Override
public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException {
for(Path file : files.keySet()) {
try {
callback.delete(file);
// Delete the file or folder at a given path. If the path is a folder, all its contents will be deleted too.
if(file.attributes().isDuplicate()) {
new DbxUserFilesRequests(session.getClient(file)).permanentlyDelete(containerService.getKey(file),
file.attributes().getVersionId());
}
else {
new DbxUserFilesRequests(session.getClient(file)).deleteV2(containerService.getKey(file));
}
}
catch(DbxException e) {
throw new DropboxExceptionMappingService().map("Cannot delete {0}", e, file);
}
}
}
|
@Test(expected = NotfoundException.class)
public void testDeleteNotFound() throws Exception {
final Path test = new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
new DropboxDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
@Override
public List<List<Integer>> split(List<Integer> glyphIds)
{
String originalGlyphsAsText = convertGlyphIdsToString(glyphIds);
List<String> tokens = compoundCharacterTokenizer.tokenize(originalGlyphsAsText);
List<List<Integer>> modifiedGlyphs = new ArrayList<>(tokens.size());
tokens.forEach(token -> modifiedGlyphs.add(convertGlyphIdsToList(token)));
return modifiedGlyphs;
}
|
@Test
void testSplit_3()
{
// given
Set<List<Integer>> matchers = new HashSet<>(
Arrays.asList(Arrays.asList(67, 112, 96), Arrays.asList(74, 112, 76)));
GlyphArraySplitter testClass = new GlyphArraySplitterRegexImpl(matchers);
List<Integer> glyphIds = Arrays.asList(94, 67, 112, 96, 112, 91, 103);
// when
List<List<Integer>> tokens = testClass.split(glyphIds);
// then
assertEquals(Arrays.asList(Arrays.asList(94), Arrays.asList(67, 112, 96),
Arrays.asList(112, 91, 103)), tokens);
}
|
@Override
public ScheduledFuture<?> schedule(Runnable command, long delay, TimeUnit unit) {
Map<String, String> mdcContextMap = getMdcContextMap();
return super.schedule(ContextPropagator.decorateRunnable(contextPropagators, () -> {
try {
setMDCContext(mdcContextMap);
command.run();
} finally {
MDC.clear();
}
}), delay, unit);
}
|
@Test
public void testScheduleRunnablePropagatesContext() {
TestThreadLocalContextHolder.put("ValueShouldCrossThreadBoundary");
final ScheduledFuture<?> schedule = schedulerService.schedule(() -> {
TestThreadLocalContextHolder.get().orElseThrow(() -> new RuntimeException("Found No Context"));
}, 0, TimeUnit.MILLISECONDS);
assertThatCode(() -> schedule.get()).doesNotThrowAnyException();
}
|
@Override
public StringBuilder toXML(XmlEnvironment enclosingXmlEnvironment) {
// This is only the potential length, since the actual length depends on the given XmlEnvironment.
int potentialLength = length();
StringBuilder res = new StringBuilder(potentialLength);
appendXmlTo(csq -> res.append(csq), enclosingXmlEnvironment);
return res;
}
|
@Test
public void equalInnerNamespaceTest() {
StandardExtensionElement innerOne = StandardExtensionElement.builder("inner", "inner-namespace").build();
StandardExtensionElement innerTwo = StandardExtensionElement.builder("inner", "inner-namespace").build();
StandardExtensionElement outer = StandardExtensionElement.builder("outer", "outer-namespace").addElement(
innerOne).addElement(innerTwo).build();
String expectedXml = "<outer xmlns='outer-namespace'><inner xmlns='inner-namespace'></inner><inner xmlns='inner-namespace'></inner></outer>";
XmlStringBuilder actualXml = outer.toXML(XmlEnvironment.EMPTY);
XmlAssertUtil.assertXmlSimilar(expectedXml, actualXml);
StringBuilder actualXmlTwo = actualXml.toXML(XmlEnvironment.EMPTY);
XmlAssertUtil.assertXmlSimilar(expectedXml, actualXmlTwo);
}
|
public FEELFnResult<List<Object>> invoke(@ParameterName("list") Object[] lists) {
if ( lists == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "lists", "cannot be null"));
}
final Set<Object> resultSet = new LinkedHashSet<>();
for ( final Object list : lists ) {
if ( list instanceof Collection ) {
resultSet.addAll((Collection) list);
} else {
resultSet.add(list);
}
}
// spec requires us to return a new list
return FEELFnResult.ofResult( new ArrayList<>(resultSet) );
}
|
@Test
void invokeNull() {
FunctionTestUtil.assertResultError(unionFunction.invoke(null), InvalidParametersEvent.class);
}
|
public void completeDefaults(Props props) {
// init string properties
for (Map.Entry<Object, Object> entry : defaults().entrySet()) {
props.setDefault(entry.getKey().toString(), entry.getValue().toString());
}
boolean clusterEnabled = props.valueAsBoolean(CLUSTER_ENABLED.getKey(), false);
if (!clusterEnabled) {
props.setDefault(SEARCH_HOST.getKey(), InetAddress.getLoopbackAddress().getHostAddress());
props.setDefault(SEARCH_PORT.getKey(), "9001");
fixPortIfZero(props, Property.SEARCH_HOST.getKey(), SEARCH_PORT.getKey());
fixEsTransportPortIfNull(props);
}
}
|
@Test
public void completeDefaults_does_not_set_the_transport_port_of_elasticsearch_if_value_is_zero_in_search_node_in_cluster() {
Properties p = new Properties();
p.setProperty("sonar.cluster.enabled", "true");
p.setProperty("sonar.es.port", "0");
Props props = new Props(p);
processProperties.completeDefaults(props);
assertThat(props.valueAsInt("sonar.es.port")).isZero();
}
|
@Override
public DescriptiveUrl toDownloadUrl(final Path file, final Sharee sharee, final Object options, final PasswordCallback callback) throws BackgroundException {
final Permission permission = new Permission();
// To make a file public you will need to assign the role reader to the type anyone
permission.setRole("reader");
permission.setType("anyone");
try {
session.getClient().permissions().create(fileid.getFileId(file), permission)
.setSupportsAllDrives(new HostPreferences(session.getHost()).getBoolean("googledrive.teamdrive.enable")).execute();
}
catch(IOException e) {
throw new DriveExceptionMappingService(fileid).map("Failure to write attributes of {0}", e, file);
}
return new DriveUrlProvider().toUrl(file).find(DescriptiveUrl.Type.http);
}
|
@Test
public void toDownloadUrl() throws Exception {
final DriveFileIdProvider fileid = new DriveFileIdProvider(session);
final Path test = new DriveTouchFeature(session, fileid).touch(
new Path(DriveHomeFinderService.MYDRIVE_FOLDER, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)), new TransferStatus().withMime("x-application/cyberduck"));
final DriveSharingUrlProvider provider = new DriveSharingUrlProvider(session, fileid);
// Set web view link
test.setAttributes(new DriveAttributesFinderFeature(session, fileid).find(test));
assertFalse(provider.isSupported(test, Share.Type.upload));
assertTrue(provider.isSupported(test, Share.Type.download));
final DescriptiveUrl url = provider.toDownloadUrl(test, Share.Sharee.world, null, new DisabledPasswordCallback());
assertNotEquals(DescriptiveUrl.EMPTY, url);
assertNotNull(url.getUrl());
new DriveDeleteFeature(session, fileid).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
public static String entityUuidOf(String id) {
if (id.startsWith(ID_PREFIX)) {
return id.substring(ID_PREFIX.length());
}
return id;
}
|
@Test
public void projectUuidOf_returns_argument_if_does_not_starts_with_id_prefix() {
String id = randomAlphabetic(1 + new Random().nextInt(10));
assertThat(AuthorizationDoc.entityUuidOf(id)).isEqualTo(id);
assertThat(AuthorizationDoc.entityUuidOf("")).isEmpty();
}
|
public LogicalSchema getIntermediateSchema() {
return intermediateSchema;
}
|
@Test
public void shouldBuildPullQueryIntermediateSchemaSelectValueNonWindowed() {
// Given:
selects = ImmutableList.of(new SingleColumn(COL0_REF, Optional.of(ALIAS)));
when(keyFormat.isWindowed()).thenReturn(false);
when(analysis.getSelectColumnNames()).thenReturn(ImmutableSet.of(ALIAS));
// When:
final QueryProjectNode projectNode = new QueryProjectNode(
NODE_ID,
source,
selects,
metaStore,
ksqlConfig,
analysis,
false,
plannerOptions,
false
);
// Then:
assertThat(INPUT_SCHEMA.withoutPseudoAndKeyColsInValue(),
is(projectNode.getIntermediateSchema()));
}
|
public static <K> KTableHolder<K> build(
final KTableHolder<K> table,
final TableFilter<K> step,
final RuntimeBuildContext buildContext) {
return build(table, step, buildContext, SqlPredicate::new);
}
|
@Test
public void shouldUseCorrectNameForProcessingLogger() {
// When:
step.build(planBuilder, planInfo);
// Then:
verify(buildContext).getProcessingLogger(queryContext);
}
|
public static KTableHolder<GenericKey> build(
final KGroupedStreamHolder groupedStream,
final StreamAggregate aggregate,
final RuntimeBuildContext buildContext,
final MaterializedFactory materializedFactory) {
return build(
groupedStream,
aggregate,
buildContext,
materializedFactory,
new AggregateParamsFactory()
);
}
|
@Test
public void shouldBuildMaterializedWithCorrectSerdesForUnwindowedAggregate() {
// Given:
givenUnwindowedAggregate();
// When:
aggregate.build(planBuilder, planInfo);
// Then:
verify(materializedFactory).create(same(keySerde), same(valueSerde), any());
}
|
public static String getTypeName(final int type) {
switch (type) {
case START_EVENT_V3:
return "Start_v3";
case STOP_EVENT:
return "Stop";
case QUERY_EVENT:
return "Query";
case ROTATE_EVENT:
return "Rotate";
case INTVAR_EVENT:
return "Intvar";
case LOAD_EVENT:
return "Load";
case NEW_LOAD_EVENT:
return "New_load";
case SLAVE_EVENT:
return "Slave";
case CREATE_FILE_EVENT:
return "Create_file";
case APPEND_BLOCK_EVENT:
return "Append_block";
case DELETE_FILE_EVENT:
return "Delete_file";
case EXEC_LOAD_EVENT:
return "Exec_load";
case RAND_EVENT:
return "RAND";
case XID_EVENT:
return "Xid";
case USER_VAR_EVENT:
return "User var";
case FORMAT_DESCRIPTION_EVENT:
return "Format_desc";
case TABLE_MAP_EVENT:
return "Table_map";
case PRE_GA_WRITE_ROWS_EVENT:
return "Write_rows_event_old";
case PRE_GA_UPDATE_ROWS_EVENT:
return "Update_rows_event_old";
case PRE_GA_DELETE_ROWS_EVENT:
return "Delete_rows_event_old";
case WRITE_ROWS_EVENT_V1:
return "Write_rows_v1";
case UPDATE_ROWS_EVENT_V1:
return "Update_rows_v1";
case DELETE_ROWS_EVENT_V1:
return "Delete_rows_v1";
case BEGIN_LOAD_QUERY_EVENT:
return "Begin_load_query";
case EXECUTE_LOAD_QUERY_EVENT:
return "Execute_load_query";
case INCIDENT_EVENT:
return "Incident";
case HEARTBEAT_LOG_EVENT:
case HEARTBEAT_LOG_EVENT_V2:
return "Heartbeat";
case IGNORABLE_LOG_EVENT:
return "Ignorable";
case ROWS_QUERY_LOG_EVENT:
return "Rows_query";
case WRITE_ROWS_EVENT:
return "Write_rows";
case UPDATE_ROWS_EVENT:
return "Update_rows";
case DELETE_ROWS_EVENT:
return "Delete_rows";
case GTID_LOG_EVENT:
return "Gtid";
case ANONYMOUS_GTID_LOG_EVENT:
return "Anonymous_Gtid";
case PREVIOUS_GTIDS_LOG_EVENT:
return "Previous_gtids";
case PARTIAL_UPDATE_ROWS_EVENT:
return "Update_rows_partial";
case TRANSACTION_CONTEXT_EVENT :
return "Transaction_context";
case VIEW_CHANGE_EVENT :
return "view_change";
case XA_PREPARE_LOG_EVENT :
return "Xa_prepare";
case TRANSACTION_PAYLOAD_EVENT :
return "transaction_payload";
default:
return "Unknown type:" + type;
}
}
|
@Test
public void getTypeNameInputPositiveOutputNotNull6() {
// Arrange
final int type = 6;
// Act
final String actual = LogEvent.getTypeName(type);
// Assert result
Assert.assertEquals("Load", actual);
}
|
public static boolean parse(final String str, ResTable_config out) {
return parse(str, out, true);
}
|
@Test
public void parse_uiModeNight_night() {
ResTable_config config = new ResTable_config();
ConfigDescription.parse("night", config);
assertThat(config.uiMode).isEqualTo(UI_MODE_NIGHT_YES);
}
|
public static GenericRecord dataMapToGenericRecord(DataMap map, RecordDataSchema dataSchema) throws DataTranslationException
{
Schema avroSchema = SchemaTranslator.dataToAvroSchema(dataSchema);
return dataMapToGenericRecord(map, dataSchema, avroSchema, null);
}
|
@Test
public void testInfinityAndNan() throws IOException {
String schemaText =
"{\n" +
" \"type\" : \"record\",\n" +
" \"name\" : \"Foo\",\n" +
" \"fields\" : [\n" +
" { \"name\" : \"doubleRequired\", \"type\" : \"double\" },\n" +
" { \"name\" : \"floatRequired\", \"type\" : \"float\" }\n" +
" ]\n" +
"}\n";
// First element is the input, second element is the expected output
Object[][] inputs = {
{
"{ \"doubleRequired\" : \"Infinity\", \"floatRequired\" : \"Infinity\"}",
Double.POSITIVE_INFINITY,
Float.POSITIVE_INFINITY
}, {
"{ \"doubleRequired\" : \"NaN\", \"floatRequired\" : \"NaN\"}",
Double.NaN,
Float.NaN
}, {
"{ \"doubleRequired\" : \"-Infinity\", \"floatRequired\" : \"-Infinity\"}",
Double.NEGATIVE_INFINITY,
Float.NEGATIVE_INFINITY
}
};
RecordDataSchema recordDataSchema = (RecordDataSchema) TestUtil.dataSchemaFromString(schemaText);
for (Object[] input : inputs) {
DataMap dataMap = TestUtil.dataMapFromString((String) input[0]);
Schema avroSchema = SchemaTranslator.dataToAvroSchema(recordDataSchema);
GenericRecord avroRecord = DataTranslator.dataMapToGenericRecord(dataMap, recordDataSchema, avroSchema);
assertEquals(avroRecord.get("doubleRequired"), input[1]);
assertEquals(avroRecord.get("floatRequired"), input[2]);
}
}
|
public void onClose()
{
if (asyncTaskExecutor instanceof ExecutorService)
{
try
{
final ExecutorService executor = (ExecutorService)asyncTaskExecutor;
executor.shutdownNow();
if (!executor.awaitTermination(EXECUTOR_SHUTDOWN_TIMEOUT_SECONDS, TimeUnit.SECONDS))
{
ctx.errorHandler().onError(new AeronEvent("failed to shutdown async task executor"));
}
}
catch (final Exception e)
{
ctx.errorHandler().onError(e);
}
}
CloseHelper.close(ctx.errorHandler(), nameResolver);
publicationImages.forEach(PublicationImage::free);
networkPublications.forEach(NetworkPublication::free);
ipcPublications.forEach(IpcPublication::free);
freeEndOfLifeResources(Integer.MAX_VALUE);
toDriverCommands.consumerHeartbeatTime(Aeron.NULL_VALUE);
ctx.cncByteBuffer().force();
ctx.close();
}
|
@Test
void onCloseShouldCallForceOnTheCncByteBuffer(@TempDir final Path dir)
{
final MappedByteBuffer cncByteBuffer = spy(IoUtil.mapNewFile(dir.resolve("test.cnc").toFile(), 1024));
final DriverConductor conductor = new DriverConductor(ctx.clone().cncByteBuffer(cncByteBuffer));
conductor.onClose();
final InOrder inOrder = inOrder(toDriverCommands, cncByteBuffer);
inOrder.verify(toDriverCommands).consumerHeartbeatTime(Aeron.NULL_VALUE);
inOrder.verify(cncByteBuffer).force();
}
|
@Override
public Map<String, String> getAddresses() {
AwsCredentials credentials = awsCredentialsProvider.credentials();
List<String> taskAddresses = emptyList();
if (!awsConfig.anyOfEc2PropertiesConfigured()) {
taskAddresses = awsEcsApi.listTaskPrivateAddresses(cluster, credentials);
LOGGER.fine("AWS ECS DescribeTasks found the following addresses: %s", taskAddresses);
}
if (!taskAddresses.isEmpty()) {
return awsEc2Api.describeNetworkInterfaces(taskAddresses, credentials);
} else if (DiscoveryMode.Client == awsConfig.getDiscoveryMode() && !awsConfig.anyOfEcsPropertiesConfigured()) {
LOGGER.fine("No tasks found in ECS cluster: '%s'. Trying AWS EC2 Discovery.", cluster);
return awsEc2Api.describeInstances(credentials);
}
return emptyMap();
}
|
@Test
public void getAddressesNoTasks() {
// given
given(awsEcsApi.listTaskPrivateAddresses(CLUSTER, CREDENTIALS)).willReturn(emptyList());
// when
Map<String, String> result = awsEcsClient.getAddresses();
// then
assertTrue(result.isEmpty());
}
|
public NonClosedTracking<RAW, BASE> trackNonClosed(Input<RAW> rawInput, Input<BASE> baseInput) {
NonClosedTracking<RAW, BASE> tracking = NonClosedTracking.of(rawInput, baseInput);
// 1. match by rule, line, line hash and message
match(tracking, LineAndLineHashAndMessage::new);
// 2. match issues with same rule, same line and same line hash, but not necessarily with same message
match(tracking, LineAndLineHashKey::new);
// 3. detect code moves by comparing blocks of codes
detectCodeMoves(rawInput, baseInput, tracking);
// 4. match issues with same rule, same message and same line hash
match(tracking, LineHashAndMessageKey::new);
// 5. match issues with same rule, same line and same message
match(tracking, LineAndMessageKey::new);
// 6. match issues with same rule and same line hash but different line and different message.
// See SONAR-2812
match(tracking, LineHashKey::new);
return tracking;
}
|
@Test
public void similar_issues_except_message_match() {
FakeInput baseInput = new FakeInput("H1");
Issue base = baseInput.createIssueOnLine(1, RULE_SYSTEM_PRINT, "msg1");
FakeInput rawInput = new FakeInput("H1");
Issue raw = rawInput.createIssueOnLine(1, RULE_SYSTEM_PRINT, "msg2");
Tracking<Issue, Issue> tracking = tracker.trackNonClosed(rawInput, baseInput);
assertThat(tracking.baseFor(raw)).isSameAs(base);
}
|
public void returnProduct(Product product) {
LOGGER.info(
String.format(
"%s want to return %s($%.2f)...",
name, product.getName(), product.getSalePrice().getAmount()));
if (purchases.contains(product)) {
try {
customerDao.deleteProduct(product, this);
purchases.remove(product);
receiveMoney(product.getSalePrice());
LOGGER.info(String.format("%s returned %s!", name, product.getName()));
} catch (SQLException ex) {
LOGGER.error(ex.getMessage());
}
} else {
LOGGER.error(String.format("%s didn't buy %s...", name, product.getName()));
}
}
|
@Test
void shouldRemoveProductFromPurchases() {
customer.setPurchases(new ArrayList<>(Arrays.asList(product)));
customer.returnProduct(product);
assertEquals(new ArrayList<>(), customer.getPurchases());
assertEquals(Money.of(USD, 200), customer.getMoney());
customer.returnProduct(product);
assertEquals(new ArrayList<>(), customer.getPurchases());
assertEquals(Money.of(USD, 200), customer.getMoney());
}
|
public static String getType(String fileStreamHexHead) {
if(StrUtil.isBlank(fileStreamHexHead)){
return null;
}
if (MapUtil.isNotEmpty(FILE_TYPE_MAP)) {
for (final Entry<String, String> fileTypeEntry : FILE_TYPE_MAP.entrySet()) {
if (StrUtil.startWithIgnoreCase(fileStreamHexHead, fileTypeEntry.getKey())) {
return fileTypeEntry.getValue();
}
}
}
byte[] bytes = HexUtil.decodeHex(fileStreamHexHead);
return FileMagicNumber.getMagicNumber(bytes).getExtension();
}
|
@Test
@Disabled
public void webpTest(){
// https://gitee.com/dromara/hutool/issues/I5BGTF
final File file = FileUtil.file("d:/test/a.webp");
final BufferedInputStream inputStream = FileUtil.getInputStream(file);
final String type = FileTypeUtil.getType(inputStream);
Console.log(type);
}
|
public static AuditManagerS3A createAndStartAuditManager(
Configuration conf,
IOStatisticsStore iostatistics) {
AuditManagerS3A auditManager;
if (conf.getBoolean(AUDIT_ENABLED, AUDIT_ENABLED_DEFAULT)) {
auditManager = new ActiveAuditManagerS3A(
requireNonNull(iostatistics));
} else {
LOG.debug("auditing is disabled");
auditManager = stubAuditManager();
}
auditManager.init(conf);
auditManager.start();
LOG.debug("Started Audit Manager {}", auditManager);
return auditManager;
}
|
@Test
public void testRequestHandlerLoading() throws Throwable {
Configuration conf = noopAuditConfig();
conf.setClassLoader(this.getClass().getClassLoader());
conf.set(AUDIT_EXECUTION_INTERCEPTORS,
SimpleAWSExecutionInterceptor.CLASS);
AuditManagerS3A manager = AuditIntegration.createAndStartAuditManager(
conf,
ioStatistics);
assertThat(manager.createExecutionInterceptors())
.hasSize(2)
.hasAtLeastOneElementOfType(SimpleAWSExecutionInterceptor.class);
}
|
@Override
public Set<EmailRecipient> findSubscribedEmailRecipients(String dispatcherKey, String projectKey,
SubscriberPermissionsOnProject subscriberPermissionsOnProject) {
verifyProjectKey(projectKey);
try (DbSession dbSession = dbClient.openSession(false)) {
Set<EmailSubscriberDto> emailSubscribers = dbClient.propertiesDao().findEmailSubscribersForNotification(
dbSession, dispatcherKey, EmailNotificationChannel.class.getSimpleName(), projectKey);
return keepAuthorizedEmailSubscribers(dbSession, projectKey, subscriberPermissionsOnProject, emailSubscribers);
}
}
|
@Test
public void findSubscribedEmailRecipients_with_logins_returns_empty_if_no_email_recipients_in_project_for_dispatcher_key() {
String dispatcherKey = randomAlphabetic(12);
String globalPermission = randomAlphanumeric(4);
String projectPermission = randomAlphanumeric(5);
String projectKey = randomAlphabetic(6);
Set<String> logins = IntStream.range(0, 1 + new Random().nextInt(10))
.mapToObj(i -> "login_" + i)
.collect(Collectors.toSet());
when(propertiesDao.findEmailSubscribersForNotification(dbSession, dispatcherKey, "EmailNotificationChannel", projectKey, logins))
.thenReturn(Collections.emptySet());
Set<EmailRecipient> emailRecipients = underTest.findSubscribedEmailRecipients(dispatcherKey, projectKey, logins,
new SubscriberPermissionsOnProject(globalPermission, projectPermission));
assertThat(emailRecipients).isEmpty();
verify(authorizationDao, times(0)).keepAuthorizedLoginsOnEntity(any(DbSession.class), anySet(), anyString(), anyString());
}
|
@Override
public String getDataSource() {
return DataSourceConstant.DERBY;
}
|
@Test
void testGetDataSource() {
String dataSource = tenantCapacityMapperByDerby.getDataSource();
assertEquals(DataSourceConstant.DERBY, dataSource);
}
|
@Override
public synchronized void cleanupAll() throws KafkaResourceManagerException {
LOG.info("Attempting to cleanup Kafka manager.");
boolean producedError = false;
// First, delete kafka topics if it was not given as a static argument
try {
if (!usingStaticTopic) {
kafkaClient.deleteTopics(topicNames).all().get();
}
} catch (Exception e) {
LOG.error("Failed to delete kafka topic.", e);
producedError = true;
}
// Throw Exception at the end if there were any errors
if (producedError) {
throw new KafkaResourceManagerException(
"Failed to delete resources. Check above for errors.");
}
super.cleanupAll();
LOG.info("Kafka manager successfully cleaned up.");
}
|
@Test
public void testCleanupAllShouldThrowErrorWhenKafkaClientFailsToDeleteTopic()
throws ExecutionException, InterruptedException {
when(kafkaClient.deleteTopics(anyCollection()).all().get())
.thenThrow(new ExecutionException(new RuntimeException("delete topic future fails")));
assertThrows(KafkaResourceManagerException.class, () -> testManager.cleanupAll());
}
|
@Override
public ContainersInfo getContainers(HttpServletRequest req,
HttpServletResponse res, String appId, String appAttemptId) {
// Check that the appId/appAttemptId format is accurate
try {
RouterServerUtil.validateApplicationId(appId);
RouterServerUtil.validateApplicationAttemptId(appAttemptId);
} catch (IllegalArgumentException e) {
RouterAuditLogger.logFailure(getUser().getShortUserName(), GET_CONTAINERS,
UNKNOWN, TARGET_WEB_SERVICE, e.getLocalizedMessage());
routerMetrics.incrGetContainersFailedRetrieved();
throw e;
}
try {
long startTime = clock.getTime();
ContainersInfo containersInfo = new ContainersInfo();
Collection<SubClusterInfo> subClustersActive = federationFacade.getActiveSubClusters();
Class[] argsClasses = new Class[]{
HttpServletRequest.class, HttpServletResponse.class, String.class, String.class};
Object[] args = new Object[]{req, res, appId, appAttemptId};
ClientMethod remoteMethod = new ClientMethod("getContainers", argsClasses, args);
Map<SubClusterInfo, ContainersInfo> containersInfoMap =
invokeConcurrent(subClustersActive, remoteMethod, ContainersInfo.class);
if (containersInfoMap != null && !containersInfoMap.isEmpty()) {
containersInfoMap.values().forEach(containers ->
containersInfo.addAll(containers.getContainers()));
}
if (containersInfo != null) {
long stopTime = clock.getTime();
RouterAuditLogger.logSuccess(getUser().getShortUserName(), GET_CONTAINERS,
TARGET_WEB_SERVICE);
routerMetrics.succeededGetContainersRetrieved(stopTime - startTime);
return containersInfo;
}
} catch (NotFoundException e) {
routerMetrics.incrGetContainersFailedRetrieved();
RouterAuditLogger.logFailure(getUser().getShortUserName(), GET_CONTAINERS,
UNKNOWN, TARGET_WEB_SERVICE, e.getLocalizedMessage());
RouterServerUtil.logAndThrowRunTimeException(e, "getContainers error, appId = %s, " +
" appAttemptId = %s, Probably getActiveSubclusters error.", appId, appAttemptId);
} catch (IOException | YarnException e) {
routerMetrics.incrGetContainersFailedRetrieved();
RouterAuditLogger.logFailure(getUser().getShortUserName(), GET_CONTAINERS,
UNKNOWN, TARGET_WEB_SERVICE, e.getLocalizedMessage());
RouterServerUtil.logAndThrowRunTimeException(e, "getContainers error, appId = %s, " +
" appAttemptId = %s.", appId, appAttemptId);
}
routerMetrics.incrGetContainersFailedRetrieved();
RouterAuditLogger.logFailure(getUser().getShortUserName(), GET_CONTAINERS,
UNKNOWN, TARGET_WEB_SERVICE, "getContainers failed.");
throw RouterServerUtil.logAndReturnRunTimeException(
"getContainers failed, appId: %s, appAttemptId: %s.", appId, appAttemptId);
}
|
@Test
public void testGetContainersNotExists() throws Exception {
ApplicationId appId = ApplicationId.newInstance(Time.now(), 1);
LambdaTestUtils.intercept(IllegalArgumentException.class,
"Parameter error, the appAttemptId is empty or null.",
() -> interceptor.getContainers(null, null, appId.toString(), null));
}
|
public static FileRewriteCoordinator get() {
return INSTANCE;
}
|
@Test
public void testCommitMultipleRewrites() throws NoSuchTableException, IOException {
sql("CREATE TABLE %s (id INT, data STRING) USING iceberg", tableName);
Dataset<Row> df = newDF(1000);
// add first two files
df.coalesce(1).writeTo(tableName).append();
df.coalesce(1).writeTo(tableName).append();
Table table = validationCatalog.loadTable(tableIdent);
String firstFileSetID = UUID.randomUUID().toString();
long firstFileSetSnapshotId = table.currentSnapshot().snapshotId();
ScanTaskSetManager taskSetManager = ScanTaskSetManager.get();
try (CloseableIterable<FileScanTask> tasks = table.newScan().planFiles()) {
// stage first 2 files for compaction
taskSetManager.stageTasks(table, firstFileSetID, Lists.newArrayList(tasks));
}
// add two more files
df.coalesce(1).writeTo(tableName).append();
df.coalesce(1).writeTo(tableName).append();
table.refresh();
String secondFileSetID = UUID.randomUUID().toString();
try (CloseableIterable<FileScanTask> tasks =
table.newScan().appendsAfter(firstFileSetSnapshotId).planFiles()) {
// stage 2 more files for compaction
taskSetManager.stageTasks(table, secondFileSetID, Lists.newArrayList(tasks));
}
ImmutableSet<String> fileSetIDs = ImmutableSet.of(firstFileSetID, secondFileSetID);
for (String fileSetID : fileSetIDs) {
// read and pack 2 files into 1 split
Dataset<Row> scanDF =
spark
.read()
.format("iceberg")
.option(SparkReadOptions.SCAN_TASK_SET_ID, fileSetID)
.option(SparkReadOptions.SPLIT_SIZE, Long.MAX_VALUE)
.load(tableName);
// write the combined data as one file
scanDF
.writeTo(tableName)
.option(SparkWriteOptions.REWRITTEN_FILE_SCAN_TASK_SET_ID, fileSetID)
.append();
}
// commit both rewrites at the same time
FileRewriteCoordinator rewriteCoordinator = FileRewriteCoordinator.get();
Set<DataFile> rewrittenFiles =
fileSetIDs.stream()
.flatMap(fileSetID -> taskSetManager.fetchTasks(table, fileSetID).stream())
.map(t -> t.asFileScanTask().file())
.collect(Collectors.toSet());
Set<DataFile> addedFiles =
fileSetIDs.stream()
.flatMap(fileSetID -> rewriteCoordinator.fetchNewFiles(table, fileSetID).stream())
.collect(Collectors.toSet());
table.newRewrite().rewriteFiles(rewrittenFiles, addedFiles).commit();
table.refresh();
Assert.assertEquals("Should produce 5 snapshots", 5, Iterables.size(table.snapshots()));
Map<String, String> summary = table.currentSnapshot().summary();
Assert.assertEquals("Deleted files count must match", "4", summary.get("deleted-data-files"));
Assert.assertEquals("Added files count must match", "2", summary.get("added-data-files"));
Object rowCount = scalarSql("SELECT count(*) FROM %s", tableName);
Assert.assertEquals("Row count must match", 4000L, rowCount);
}
|
public static String getRemoteAddrFromRequest(Request request, Set<IpSubnet> trustedSubnets) {
final String remoteAddr = request.getRemoteAddr();
final String XForwardedFor = request.getHeader("X-Forwarded-For");
if (XForwardedFor != null) {
for (IpSubnet s : trustedSubnets) {
try {
if (s.contains(remoteAddr)) {
// Request came from trusted source, trust X-Forwarded-For and return it
return XForwardedFor;
}
} catch (UnknownHostException e) {
// ignore silently, probably not worth logging
}
}
}
// Request did not come from a trusted source, or the X-Forwarded-For header was not set
return remoteAddr;
}
|
@Test
public void getRemoteAddrFromRequestWorksWithIPv6IfSubnetsContainsOnlyIPv6() throws Exception {
final Request request = mock(Request.class);
when(request.getRemoteAddr()).thenReturn("2001:DB8::42");
when(request.getHeader("X-Forwarded-For")).thenReturn("2001:DB8::1:2:3:4:5:6");
final String s = RestTools.getRemoteAddrFromRequest(request, Collections.singleton(new IpSubnet("2001:DB8::/32")));
assertThat(s).isEqualTo("2001:DB8::1:2:3:4:5:6");
}
|
Getter getGetter(Object targetObject, String attributeName, boolean failOnMissingReflectiveAttribute) {
Getter getter = getterCache.getGetter(targetObject.getClass(), attributeName);
if (getter == null) {
getter = instantiateGetter(targetObject, attributeName, failOnMissingReflectiveAttribute);
if (getter.isCacheable()) {
getterCache.putGetter(targetObject.getClass(), attributeName, getter);
}
}
return getter;
}
|
@Test
public void when_getGetterByReflection_then_getterInCache() {
// GIVEN
Extractors extractors = createExtractors(null);
// WHEN
Getter getterFirstInvocation = extractors.getGetter(bond, "car.power", true);
Getter getterSecondInvocation = extractors.getGetter(bond, "car.power", true);
// THEN
assertThat(getterFirstInvocation)
.isInstanceOf(FieldGetter.class)
.isSameAs(getterSecondInvocation);
}
|
@Override
public List<RuleNode> findRuleNodesByTenantIdAndType(TenantId tenantId, String type, String configurationSearch) {
return DaoUtil.convertDataList(ruleNodeRepository.findRuleNodesByTenantIdAndType(tenantId.getId(), type, configurationSearch));
}
|
@Test
public void testFindRuleNodesByTenantIdAndType() {
List<RuleNode> ruleNodes1 = ruleNodeDao.findRuleNodesByTenantIdAndType(tenantId1, "A", PREFIX_FOR_RULE_NODE_NAME);
assertEquals(20, ruleNodes1.size());
List<RuleNode> ruleNodes2 = ruleNodeDao.findRuleNodesByTenantIdAndType(tenantId2, "B", PREFIX_FOR_RULE_NODE_NAME);
assertEquals(20, ruleNodes2.size());
ruleNodes1 = ruleNodeDao.findRuleNodesByTenantIdAndType(tenantId1, "A", null);
assertEquals(20, ruleNodes1.size());
ruleNodes2 = ruleNodeDao.findRuleNodesByTenantIdAndType(tenantId2, "B", null);
assertEquals(20, ruleNodes2.size());
}
|
public static HazelcastInstance newHazelcastInstance(Config config) {
if (config == null) {
config = Config.load();
}
return newHazelcastInstance(
config,
config.getInstanceName(),
new DefaultNodeContext()
);
}
|
@Test
public void test_NewInstance_configLoaded() {
hazelcastInstance = HazelcastInstanceFactory.newHazelcastInstance(null);
assertNotNull(hazelcastInstance.getConfig());
}
|
public static QueryDescription forQueryMetadata(
final QueryMetadata queryMetadata,
final Map<KsqlHostInfoEntity, KsqlQueryStatus> ksqlHostQueryStatus
) {
if (queryMetadata instanceof PersistentQueryMetadata) {
final PersistentQueryMetadata persistentQuery = (PersistentQueryMetadata) queryMetadata;
return create(
persistentQuery,
persistentQuery.getResultTopic().map(t -> t.getKeyFormat().getWindowType())
.orElse(Optional.empty()),
persistentQuery.getSinkName(),
ksqlHostQueryStatus
);
}
return create(
queryMetadata,
Optional.empty(),
Optional.empty(),
ksqlHostQueryStatus
);
}
|
@Test
public void shouldReportPersistentQueriesStatus() {
assertThat(persistentQueryDescription.getState(), is(Optional.of("RUNNING")));
final Map<KsqlHostInfoEntity, KsqlQueryStatus> updatedStatusMap = new HashMap<>(STATUS_MAP);
updatedStatusMap.put(new KsqlHostInfoEntity("anotherhost", 8080), KsqlQueryStatus.ERROR);
final QueryDescription updatedPersistentQueryDescription =
QueryDescriptionFactory.forQueryMetadata(persistentQuery, updatedStatusMap);
assertThat(updatedPersistentQueryDescription.getState(), is(Optional.of("ERROR")));
}
|
public static SerializableFunction<byte[], Row> getProtoBytesToRowFunction(
String fileDescriptorPath, String messageName) {
ProtoSchemaInfo dynamicProtoDomain = getProtoDomain(fileDescriptorPath, messageName);
ProtoDomain protoDomain = dynamicProtoDomain.getProtoDomain();
@SuppressWarnings("unchecked")
ProtoDynamicMessageSchema<DynamicMessage> protoDynamicMessageSchema =
ProtoDynamicMessageSchema.forDescriptor(protoDomain, messageName);
return new SimpleFunction<byte[], Row>() {
@Override
public Row apply(byte[] input) {
try {
List<String> messageElements = Splitter.on('.').splitToList(messageName);
String messageTypeByName = messageElements.get(messageElements.size() - 1);
final Descriptors.Descriptor descriptor =
protoDomain
.getFileDescriptor(dynamicProtoDomain.getFileName())
.findMessageTypeByName(messageTypeByName);
DynamicMessage dynamicMessage = DynamicMessage.parseFrom(descriptor, input);
SerializableFunction<DynamicMessage, Row> res =
protoDynamicMessageSchema.getToRowFunction();
return res.apply(dynamicMessage);
} catch (InvalidProtocolBufferException e) {
LOG.error("Error parsing to DynamicMessage", e);
throw new RuntimeException(e);
}
}
};
}
|
@Test
public void testProtoBytesToRowFunctionReturnsRowSuccess() {
// Create a proto bytes to row function
SerializableFunction<byte[], Row> protoBytesToRowFunction =
ProtoByteUtils.getProtoBytesToRowFunction(DESCRIPTOR_PATH, MESSAGE_NAME);
byte[] byteArray = {
8, -46, 9, 18, 3, 68, 111, 101, 34, 35, 10, 7, 115, 101, 97, 116, 116, 108, 101, 18, 11, 102,
97, 107, 101, 32, 115, 116, 114, 101, 101, 116, 26, 2, 119, 97, 34, 7, 84, 79, 45, 49, 50, 51,
52
};
Row row = protoBytesToRowFunction.apply(byteArray);
Assert.assertEquals("Doe", row.getValue("name"));
}
|
@Override
public <VR> KStream<K, VR> mapValues(final ValueMapper<? super V, ? extends VR> valueMapper) {
return mapValues(withKey(valueMapper));
}
|
@Test
public void shouldNotAllowNullMapperOnMapValuesWithNamed() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.mapValues((ValueMapper<Object, Object>) null, Named.as("valueMapper")));
assertThat(exception.getMessage(), equalTo("valueMapper can't be null"));
}
|
@Override
public <T> T convert(DataTable dataTable, Type type) {
return convert(dataTable, type, false);
}
|
@Test
void to_map_of_primitive_to_list_of_unknown__throws_exception() {
DataTable table = parse("",
" | Annie M. G. | 1995-03-21 | 1911-03-20 |",
" | Roald | 1990-09-13 | 1916-09-13 |",
" | Astrid | 1907-10-14 | 1907-11-14 |");
CucumberDataTableException exception = assertThrows(
CucumberDataTableException.class,
() -> converter.convert(table, MAP_OF_STRING_TO_LIST_OF_DATE));
assertThat(exception.getMessage(), is("" +
"Can't convert DataTable to Map<java.lang.String, java.util.List<java.util.Date>>.\n" +
"Please review these problems:\n" +
"\n" +
" - There was no table cell transformer registered for java.util.Date.\n" +
" Please consider registering a table cell transformer.\n" +
"\n" +
" - There was no default table cell transformer registered to transform java.util.Date.\n" +
" Please consider registering a default table cell transformer.\n" +
"\n" +
"Note: Usually solving one is enough"));
}
|
@Override
public Optional<HouseTable> findById(HouseTablePrimaryKey houseTablePrimaryKey) {
return getHtsRetryTemplate(
Arrays.asList(
HouseTableRepositoryStateUnkownException.class, IllegalStateException.class))
.execute(
context ->
apiInstance
.getUserTable(
houseTablePrimaryKey.getDatabaseId(), houseTablePrimaryKey.getTableId())
.map(EntityResponseBodyUserTable::getEntity)
.map(houseTableMapper::toHouseTable)
.switchIfEmpty(Mono.empty())
.onErrorResume(this::handleHtsHttpError)
.blockOptional(Duration.ofSeconds(REQUEST_TIMEOUT_SECONDS)));
}
|
@Test
public void testFindByIdWithErrors() {
HashMap<Integer, Class> map = new HashMap<>();
map.put(404, HouseTableNotFoundException.class);
map.put(409, HouseTableConcurrentUpdateException.class);
map.put(400, HouseTableCallerException.class);
for (HashMap.Entry<Integer, Class> entry : map.entrySet()) {
mockHtsServer.enqueue(
new MockResponse()
.setResponseCode(entry.getKey())
.setBody("")
.addHeader("Content-Type", "application/json"));
Assertions.assertThrowsExactly(
entry.getValue(),
() ->
htsRepo
.findById(
HouseTablePrimaryKey.builder()
.tableId(HOUSE_TABLE.getTableId())
.databaseId(HOUSE_TABLE.getDatabaseId())
.build())
.get(),
entry.getKey().toString());
}
}
|
@SuppressWarnings("unchecked")
public QueryMetadataHolder handleStatement(
final ServiceContext serviceContext,
final Map<String, Object> configOverrides,
final Map<String, Object> requestProperties,
final PreparedStatement<?> statement,
final Optional<Boolean> isInternalRequest,
final MetricsCallbackHolder metricsCallbackHolder,
final Context context,
final boolean excludeTombstones
) {
if (statement.getStatement() instanceof Query) {
return handleQuery(
serviceContext,
(PreparedStatement<Query>) statement,
isInternalRequest,
metricsCallbackHolder,
configOverrides,
requestProperties,
context,
excludeTombstones
);
} else {
return QueryMetadataHolder.unhandled();
}
}
|
@Test
public void shouldRunScalablePushQuery_error() {
// Given:
when(ksqlEngine.executeScalablePushQuery(any(), any(), any(), any(), any(), any(), any(),
any()))
.thenThrow(new RuntimeException("Error executing!"));
// When:
Exception e = assertThrows(RuntimeException.class,
() -> queryExecutor.handleStatement(
serviceContext, ImmutableMap.of(
KsqlConfig.KSQL_QUERY_PUSH_V2_ENABLED, true,
ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest"),
ImmutableMap.of(),
pushQuery, Optional.empty(), metricsCallbackHolder, context, false));
// Should be no metrics reported for push queries
metricsCallbackHolder.reportMetrics(500, 1000L, 5000L, 20000L);
// Then:
assertThat(e.getMessage(), is("Error executing!"));
verifyNoMoreInteractions(pullQueryExecutorMetrics);
verify(scalablePushQueryMetrics).recordStatusCode(500);
verify(scalablePushQueryMetrics).recordRequestSize(1000L);
verify(scalablePushQueryMetrics).recordResponseSizeForError(5000L);
verify(scalablePushQueryMetrics).recordConnectionDurationForError(20000L);
verify(scalablePushQueryMetrics).recordZeroRowsProcessedForError();
verify(scalablePushQueryMetrics).recordZeroRowsReturnedForError();
}
|
public static void main(String[] args) {
// Getting the bar series
BarSeries series = CsvTradesLoader.loadBitstampSeries();
// Building the short selling trading strategy
Strategy strategy = buildShortSellingMomentumStrategy(series);
// Setting the trading cost models
double feePerTrade = 0.0005;
double borrowingFee = 0.00001;
CostModel transactionCostModel = new LinearTransactionCostModel(feePerTrade);
CostModel borrowingCostModel = new LinearBorrowingCostModel(borrowingFee);
// Running the strategy
BarSeriesManager seriesManager = new BarSeriesManager(series, transactionCostModel, borrowingCostModel);
Trade.TradeType entryTrade = Trade.TradeType.SELL;
TradingRecord tradingRecord = seriesManager.run(strategy, entryTrade);
DecimalFormat df = new DecimalFormat("##.##");
System.out.println("------------ Borrowing Costs ------------");
tradingRecord.getPositions()
.forEach(position -> System.out.println("Borrowing cost for "
+ df.format(position.getExit().getIndex() - position.getEntry().getIndex()) + " periods is: "
+ df.format(position.getHoldingCost().doubleValue())));
System.out.println("------------ Transaction Costs ------------");
tradingRecord.getPositions()
.forEach(position -> System.out.println("Transaction cost for selling: "
+ df.format(position.getEntry().getCost().doubleValue()) + " -- Transaction cost for buying: "
+ df.format(position.getExit().getCost().doubleValue())));
}
|
@Test
public void test() {
TradeCost.main(null);
}
|
public static Object convertValue(String className, Object cleanValue, ClassLoader classLoader) {
// "null" string is converted to null
cleanValue = "null".equals(cleanValue) ? null : cleanValue;
if (!isPrimitive(className) && cleanValue == null) {
return null;
}
Class<?> clazz = loadClass(className, classLoader);
// if it is not a String, it has to be an instance of the desired type
if (!(cleanValue instanceof String)) {
if (clazz.isInstance(cleanValue)) {
return cleanValue;
}
throw new IllegalArgumentException(new StringBuilder().append("Object ").append(cleanValue)
.append(" is not a String or an instance of ").append(className).toString());
}
String value = (String) cleanValue;
try {
if (clazz.isAssignableFrom(String.class)) {
return value;
} else if (clazz.isAssignableFrom(BigDecimal.class)) {
return parseBigDecimal(value);
} else if (clazz.isAssignableFrom(BigInteger.class)) {
return parseBigInteger(value);
} else if (clazz.isAssignableFrom(Boolean.class) || clazz.isAssignableFrom(boolean.class)) {
return parseBoolean(value);
} else if (clazz.isAssignableFrom(Byte.class) || clazz.isAssignableFrom(byte.class)) {
return Byte.parseByte(value);
} else if (clazz.isAssignableFrom(Character.class) || clazz.isAssignableFrom(char.class)) {
return parseChar(value);
} else if (clazz.isAssignableFrom(Double.class) || clazz.isAssignableFrom(double.class)) {
return Double.parseDouble(cleanStringForNumberParsing(value));
} else if (clazz.isAssignableFrom(Float.class) || clazz.isAssignableFrom(float.class)) {
return Float.parseFloat(cleanStringForNumberParsing(value));
} else if (clazz.isAssignableFrom(Integer.class) || clazz.isAssignableFrom(int.class)) {
return Integer.parseInt(cleanStringForNumberParsing(value));
} else if (clazz.isAssignableFrom(LocalDate.class)) {
return LocalDate.parse(value, DateTimeFormatter.ISO_LOCAL_DATE);
} else if (clazz.isAssignableFrom(LocalDateTime.class)) {
return LocalDateTime.parse(value, DateTimeFormatter.ISO_LOCAL_DATE_TIME);
} else if (clazz.isAssignableFrom(LocalTime.class)) {
return LocalTime.parse(value, DateTimeFormatter.ISO_LOCAL_TIME);
} else if (clazz.isAssignableFrom(Long.class) || clazz.isAssignableFrom(long.class)) {
return Long.parseLong(cleanStringForNumberParsing(value));
} else if (clazz.isAssignableFrom(Short.class) || clazz.isAssignableFrom(short.class)) {
return Short.parseShort(cleanStringForNumberParsing(value));
} else if (Enum.class.isAssignableFrom(clazz)) {
return Enum.valueOf(((Class<? extends Enum>) clazz), value);
}
} catch (RuntimeException e) {
throw new IllegalArgumentException(new StringBuilder().append("Impossible to parse '")
.append(value).append("' as ").append(className).append(" [")
.append(e.getMessage()).append("]").toString());
}
throw new IllegalArgumentException(new StringBuilder().append("Class ").append(className)
.append(" is not natively supported. Please use an MVEL expression" +
" to use it.").toString());
}
|
@Test
public void convertValueFailUnsupportedTest() {
assertThatThrownBy(() -> convertValue(RuleScenarioRunnerHelperTest.class.getCanonicalName(), "Test", classLoader))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageEndingWith("Please use an MVEL expression to use it.");
}
|
public static Read<String> readStrings() {
return Read.newBuilder(
(PubsubMessage message) -> new String(message.getPayload(), StandardCharsets.UTF_8))
.setCoder(StringUtf8Coder.of())
.build();
}
|
@Test
public void testNullTopic() {
String subscription = "projects/project/subscriptions/subscription";
PubsubIO.Read<String> read =
PubsubIO.readStrings().fromSubscription(StaticValueProvider.of(subscription));
assertNull(read.getTopicProvider());
assertNotNull(read.getSubscriptionProvider());
assertNotNull(DisplayData.from(read));
}
|
@Override
protected DefaultDispatcherResourceManagerComponentFactory
createDispatcherResourceManagerComponentFactory(Configuration configuration)
throws IOException {
return DefaultDispatcherResourceManagerComponentFactory.createJobComponentFactory(
YarnResourceManagerFactory.getInstance(),
FileJobGraphRetriever.createFrom(
configuration,
YarnEntrypointUtils.getUsrLibDir(configuration).orElse(null)));
}
|
@Test
void testCreateDispatcherResourceManagerComponentFactoryFailIfUsrLibDirDoesNotExist() {
final Configuration configuration = new Configuration();
configuration.set(
YarnConfigOptions.CLASSPATH_INCLUDE_USER_JAR,
YarnConfigOptions.UserJarInclusion.DISABLED);
final YarnJobClusterEntrypoint yarnJobClusterEntrypoint =
new YarnJobClusterEntrypoint(configuration);
assertThatThrownBy(
() ->
yarnJobClusterEntrypoint
.createDispatcherResourceManagerComponentFactory(
configuration))
.isInstanceOf(IllegalStateException.class)
.hasMessageContaining("the usrlib directory does not exist.");
}
|
public String getMetricsName(Message in, String defaultValue) {
return getStringHeader(in, MetricsConstants.HEADER_METRIC_NAME, defaultValue);
}
|
@Test
public void testGetMetricsNameNotSet() {
when(in.getHeader(HEADER_METRIC_NAME, String.class)).thenReturn(null);
assertThat(okProducer.getMetricsName(in, "name"), is("name"));
inOrder.verify(in, times(1)).getHeader(HEADER_METRIC_NAME, String.class);
inOrder.verifyNoMoreInteractions();
}
|
public static Builder builder() {
return new Builder();
}
|
@Test
public void testRoundTripSerDe() throws JsonProcessingException {
String fullJson = "{\"namespaces\":[[\"accounting\"],[\"tax\"]],\"next-page-token\":null}";
ListNamespacesResponse fullValue = ListNamespacesResponse.builder().addAll(NAMESPACES).build();
assertRoundTripSerializesEquallyFrom(fullJson, fullValue);
String emptyNamespaces = "{\"namespaces\":[],\"next-page-token\":null}";
assertRoundTripSerializesEquallyFrom(emptyNamespaces, ListNamespacesResponse.builder().build());
}
|
public void setConfigParams(Dictionary<?, ?> properties) {
boolean restartRequired = setOpenFlowPorts(properties);
restartRequired |= setWorkerThreads(properties);
restartRequired |= setTlsParameters(properties);
if (restartRequired) {
restart();
}
}
|
@Test
public void testConfiguration() {
Dictionary<String, String> properties = new Hashtable<>();
properties.put("openflowPorts", "1,2,3,4,5");
properties.put("workerThreads", "5");
controller.setConfigParams(properties);
IntStream.rangeClosed(1, 5)
.forEach(i -> assertThat(controller.openFlowPorts, hasItem(i)));
assertThat(controller.workerThreads, is(5));
}
|
public DataSchemaParser.ParseResult parseSources(String[] rawSources) throws IOException
{
Set<String> fileExtensions = _parserByFileExtension.keySet();
Map<String, List<String>> byExtension = new HashMap<>(fileExtensions.size());
for (String fileExtension : fileExtensions)
{
byExtension.put(fileExtension, new ArrayList<>());
}
String[] sortedSources = Arrays.copyOf(rawSources, rawSources.length);
Arrays.sort(sortedSources);
// Extract all schema files from the given source paths and group by extension (JARs are handled specially)
for (String source : sortedSources)
{
final File sourceFile = new File(source);
if (sourceFile.exists())
{
if (sourceFile.isDirectory())
{
// Source path is a directory, so recursively find all schema files contained therein
final FileExtensionFilter filter = new FileExtensionFilter(fileExtensions);
final List<File> sourceFilesInDirectory = FileUtil.listFiles(sourceFile, filter);
// Add each schema to the corresponding extension's source list
for (File f : sourceFilesInDirectory)
{
String ext = FilenameUtils.getExtension(f.getName());
List<String> filesForExtension = byExtension.get(ext);
if (filesForExtension != null)
{
filesForExtension.add(f.getAbsolutePath());
}
}
}
else if (sourceFile.getName().endsWith(".jar"))
{
// Source path is a JAR, so add it to each extension's source list.
// The file-based parser for each extension will extract the JAR and process only files matching the extension
byExtension.values().forEach(files -> files.add(sourceFile.getAbsolutePath()));
}
else
{
// Source path is a non-JAR file, so add it to the corresponding extension's source list
String ext = FilenameUtils.getExtension(sourceFile.getName());
List<String> filesForExtension = byExtension.get(ext);
if (filesForExtension != null)
{
filesForExtension.add(sourceFile.getAbsolutePath());
}
}
}
}
// Parse all schema files and JARs using the appropriate file format parser
final ParseResult result = new ParseResult();
for (Map.Entry<String, List<String>> entry : byExtension.entrySet())
{
String ext = entry.getKey();
List<String> files = entry.getValue();
_parserByFileExtension.get(ext).parseSources(files.toArray(new String[files.size()]), result);
}
return result;
}
|
@Test
public void testCustomResolverSchemaDirectory() throws Exception
{
String tempDirectoryPath = _tempDir.getAbsolutePath();
String jarFile = tempDirectoryPath + FS + "test.jar";
String schemaDir = TEST_RESOURCES_DIR + FS + "extensionSchemas";
SchemaDirectory customSchemaDirectory = () -> "custom";
Map<String, String> entryToFileMap = new HashMap<>();
// FooExtensions is in "extensions" directory and references "Foo" from "custom" directory.
entryToFileMap.put(schemaDir + FS + "pegasus/Foo.pdl", "custom/Foo.pdl");
entryToFileMap.put(schemaDir + FS + "extensions/FooExtensions.pdl", "extensions/FooExtensions.pdl");
createTempJarFile(entryToFileMap, jarFile);
List<SchemaDirectory> resolverDirectories = Arrays.asList(
SchemaDirectoryName.EXTENSIONS, customSchemaDirectory);
List<SchemaDirectory> sourceDirectories = Collections.singletonList(SchemaDirectoryName.EXTENSIONS);
DataSchemaParser parser = new DataSchemaParser.Builder(jarFile)
.setResolverDirectories(resolverDirectories)
.setSourceDirectories(sourceDirectories)
.build();
DataSchemaParser.ParseResult parseResult = parser.parseSources(new String[]{jarFile});
parseResult = parser.parseSources(new String[]{jarFile});
// Foo and FooExtensions
assertEquals(parseResult.getSchemaAndLocations().size(), 2);
Set<String> schemaNames = parseResult.getSchemaAndLocations().keySet().stream().map(DataSchema::getUnionMemberKey).collect(
Collectors.toSet());
assertTrue(schemaNames.contains("FooExtensions"));
assertTrue(schemaNames.contains("Foo"));
parseResult.getSchemaAndLocations().values().forEach(loc -> assertEquals(loc.getSourceFile().getAbsolutePath(), jarFile));
}
|
@Bean
public CircuitBreakerRegistry circuitBreakerRegistry(
EventConsumerRegistry<CircuitBreakerEvent> eventConsumerRegistry,
RegistryEventConsumer<CircuitBreaker> circuitBreakerRegistryEventConsumer,
@Qualifier("compositeCircuitBreakerCustomizer") CompositeCustomizer<CircuitBreakerConfigCustomizer> compositeCircuitBreakerCustomizer) {
CircuitBreakerRegistry circuitBreakerRegistry = createCircuitBreakerRegistry(
circuitBreakerProperties, circuitBreakerRegistryEventConsumer,
compositeCircuitBreakerCustomizer);
registerEventConsumer(circuitBreakerRegistry, eventConsumerRegistry);
// then pass the map here
initCircuitBreakerRegistry(circuitBreakerRegistry, compositeCircuitBreakerCustomizer);
return circuitBreakerRegistry;
}
|
@Test
public void testCreateCircuitBreakerRegistryWithSharedConfigs() {
InstanceProperties defaultProperties = new InstanceProperties();
defaultProperties.setSlidingWindowSize(1000);
defaultProperties.setPermittedNumberOfCallsInHalfOpenState(100);
InstanceProperties sharedProperties = new InstanceProperties();
sharedProperties.setSlidingWindowSize(1337);
sharedProperties.setPermittedNumberOfCallsInHalfOpenState(1000);
InstanceProperties backendWithDefaultConfig = new InstanceProperties();
backendWithDefaultConfig.setBaseConfig("default");
backendWithDefaultConfig.setPermittedNumberOfCallsInHalfOpenState(99);
InstanceProperties backendWithSharedConfig = new InstanceProperties();
backendWithSharedConfig.setBaseConfig("sharedConfig");
backendWithSharedConfig.setPermittedNumberOfCallsInHalfOpenState(999);
CircuitBreakerConfigurationProperties circuitBreakerConfigurationProperties = new CircuitBreakerConfigurationProperties();
circuitBreakerConfigurationProperties.getConfigs().put("default", defaultProperties);
circuitBreakerConfigurationProperties.getConfigs().put("sharedConfig", sharedProperties);
circuitBreakerConfigurationProperties.getInstances()
.put("backendWithDefaultConfig", backendWithDefaultConfig);
circuitBreakerConfigurationProperties.getInstances()
.put("backendWithSharedConfig", backendWithSharedConfig);
CircuitBreakerConfiguration circuitBreakerConfiguration = new CircuitBreakerConfiguration(
circuitBreakerConfigurationProperties);
DefaultEventConsumerRegistry<CircuitBreakerEvent> eventConsumerRegistry = new DefaultEventConsumerRegistry<>();
CircuitBreakerRegistry circuitBreakerRegistry = circuitBreakerConfiguration
.circuitBreakerRegistry(eventConsumerRegistry,
new CompositeRegistryEventConsumer<>(emptyList()),
compositeCircuitBreakerCustomizerTestInstance());
assertThat(circuitBreakerRegistry.getAllCircuitBreakers().size()).isEqualTo(2);
// Should get default config and overwrite setPermittedNumberOfCallsInHalfOpenState
CircuitBreaker circuitBreaker1 = circuitBreakerRegistry
.circuitBreaker("backendWithDefaultConfig");
assertThat(circuitBreaker1).isNotNull();
assertThat(circuitBreaker1.getCircuitBreakerConfig().getSlidingWindowSize())
.isEqualTo(1000);
assertThat(
circuitBreaker1.getCircuitBreakerConfig().getPermittedNumberOfCallsInHalfOpenState())
.isEqualTo(99);
// Should get shared config and overwrite setPermittedNumberOfCallsInHalfOpenState
CircuitBreaker circuitBreaker2 = circuitBreakerRegistry
.circuitBreaker("backendWithSharedConfig");
assertThat(circuitBreaker2).isNotNull();
assertThat(circuitBreaker2.getCircuitBreakerConfig().getSlidingWindowSize())
.isEqualTo(1337);
assertThat(
circuitBreaker2.getCircuitBreakerConfig().getPermittedNumberOfCallsInHalfOpenState())
.isEqualTo(999);
// Unknown backend should get default config of Registry
CircuitBreaker circuitBreaker3 = circuitBreakerRegistry.circuitBreaker("unknownBackend");
assertThat(circuitBreaker3).isNotNull();
assertThat(circuitBreaker3.getCircuitBreakerConfig().getSlidingWindowSize())
.isEqualTo(1000);
assertThat(eventConsumerRegistry.getAllEventConsumer()).hasSize(3);
}
|
@Override
public boolean match(Message msg, StreamRule rule) {
Object rawField = msg.getField(rule.getField());
if (rawField == null) {
return rule.getInverted();
}
if (rawField instanceof String) {
String field = (String) rawField;
Boolean result = rule.getInverted() ^ !(field.trim().isEmpty());
return result;
}
return !rule.getInverted();
}
|
@Test
public void testRandomNumberFieldNonMatch() throws Exception {
String fieldName = "sampleField";
Integer randomNumber = 4;
StreamRule rule = getSampleRule();
rule.setField(fieldName);
rule.setType(StreamRuleType.PRESENCE);
rule.setInverted(false);
Message message = getSampleMessage();
message.addField(fieldName, randomNumber);
StreamRuleMatcher matcher = getMatcher(rule);
Boolean result = matcher.match(message, rule);
assertTrue(result);
}
|
void writeSample(String metricName, Number value, String... labelsAndValuesArray) {
SimpleTextOutputStream stream = initGaugeType(metricName);
stream.write(metricName).write('{');
for (int i = 0; i < labelsAndValuesArray.length; i += 2) {
String labelValue = labelsAndValuesArray[i + 1];
if (labelValue != null && labelValue.indexOf('"') > -1) {
labelValue = labelValue.replace("\"", "\\\"");
}
stream.write(labelsAndValuesArray[i]).write("=\"").write(labelValue).write('\"');
if (i + 2 != labelsAndValuesArray.length) {
stream.write(',');
}
}
stream.write("} ").write(value).write('\n');
}
|
@Test
public void canWriteSampleWithoutLabels() {
underTest.writeSample("my-metric", 123);
String actual = writeToString();
assertTrue(actual.startsWith("# TYPE my-metric gauge"), "Gauge type line missing");
assertTrue(actual.contains("my-metric{} 123"), "Metric line missing");
}
|
@VisibleForTesting
public static void addUserAgentEnvironments(List<String> info) {
info.add(String.format(OS_FORMAT, OSUtils.OS_NAME));
if (EnvironmentUtils.isDocker()) {
info.add(DOCKER_KEY);
}
if (EnvironmentUtils.isKubernetes()) {
info.add(KUBERNETES_KEY);
}
if (EnvironmentUtils.isGoogleComputeEngine()) {
info.add(GCE_KEY);
} else {
addEC2Info(info);
}
}
|
@Test
public void userAgentEnvironmentStringEmpty() {
List<String> info = new ArrayList<>();
Mockito.when(EC2MetadataUtils.getUserData())
.thenThrow(new SdkClientException("Unable to contact EC2 metadata service."));
UpdateCheckUtils.addUserAgentEnvironments(info);
Assert.assertEquals(1, info.size());
Assert.assertEquals(String.format(UpdateCheckUtils.OS_FORMAT, OSUtils.OS_NAME),
info.get(0));
}
|
static String getOrValidInstanceLabelValue(String instance) {
if (instance == null) {
return "";
}
int i = Math.min(instance.length(), 63);
while (i > 0) {
char lastChar = instance.charAt(i - 1);
if (lastChar == '.' || lastChar == '-' || lastChar == '_') {
i--;
} else {
break;
}
}
return instance.substring(0, i);
}
|
@Test
public void testValidInstanceNamesAsLabelValues() {
assertThat(Labels.getOrValidInstanceLabelValue(null), is(""));
assertThat(Labels.getOrValidInstanceLabelValue(""), is(""));
assertThat(Labels.getOrValidInstanceLabelValue("valid-label-value"), is("valid-label-value"));
assertThat(Labels.getOrValidInstanceLabelValue("too-long-012345678901234567890123456789012345678901234567890123456789"), is("too-long-012345678901234567890123456789012345678901234567890123"));
assertThat(Labels.getOrValidInstanceLabelValue("too-long-01234567890123456789012345678901234567890123456789012-456789"), is("too-long-01234567890123456789012345678901234567890123456789012"));
assertThat(Labels.getOrValidInstanceLabelValue("too-long-01234567890123456789012345678901234567890123456789.---456789"), is("too-long-01234567890123456789012345678901234567890123456789"));
}
|
public static LinkExtractorParser getParser(String parserClassName)
throws LinkExtractorParseException {
// Is there a cached parser?
LinkExtractorParser parser = PARSERS.get(parserClassName);
if (parser != null) {
LOG.debug("Fetched {}", parserClassName);
return parser;
}
try {
Object clazz = Class.forName(parserClassName).getDeclaredConstructor().newInstance();
if (clazz instanceof LinkExtractorParser) {
parser = (LinkExtractorParser) clazz;
} else {
throw new LinkExtractorParseException(new ClassCastException(parserClassName));
}
} catch (IllegalArgumentException | ReflectiveOperationException | SecurityException e) {
throw new LinkExtractorParseException(e);
}
LOG.info("Created {}", parserClassName);
if (parser.isReusable()) {
LinkExtractorParser currentParser = PARSERS.putIfAbsent(
parserClassName, parser);// cache the parser if not already
// done by another thread
if (currentParser != null) {
return currentParser;
}
}
return parser;
}
|
@Test
public void testReusableCache() throws Exception {
assertSame(BaseParser.getParser(ReusableParser.class.getCanonicalName()), BaseParser.getParser(ReusableParser.class.getCanonicalName()));
}
|
void processExpiredBrokerHeartbeat(BrokerHeartbeatRequestData request) {
int brokerId = request.brokerId();
clusterControl.checkBrokerEpoch(brokerId, request.brokerEpoch());
clusterControl.heartbeatManager().touch(brokerId,
clusterControl.brokerRegistrations().get(brokerId).fenced(),
request.currentMetadataOffset());
log.error("processExpiredBrokerHeartbeat: controller event queue overloaded. Timed out " +
"heartbeat from broker {}.", brokerId);
}
|
@Test
public void testProcessExpiredBrokerHeartbeat() {
MockTime mockTime = new MockTime(0, 0, 0);
ReplicationControlTestContext ctx = new ReplicationControlTestContext.Builder().
setMockTime(mockTime).
build();
ctx.registerBrokers(0, 1, 2);
ctx.unfenceBrokers(0, 1, 2);
BrokerHeartbeatRequestData heartbeatRequest = new BrokerHeartbeatRequestData().
setBrokerId(0).
setBrokerEpoch(100).
setCurrentMetadataOffset(123).
setWantShutDown(false);
mockTime.sleep(100);
ctx.replicationControl.processExpiredBrokerHeartbeat(heartbeatRequest);
Optional<BrokerHeartbeatState> state =
ctx.clusterControl.heartbeatManager().brokers().stream().
filter(broker -> broker.id() == 0).findFirst();
assertTrue(state.isPresent());
assertEquals(0, state.get().id());
assertEquals(100000000L, state.get().lastContactNs);
assertEquals(123, state.get().metadataOffset);
}
|
public static long cityHash64(byte[] data, long seed) {
return CityHash.hash64(data, seed);
}
|
@Test
public void cityHash64Test(){
String s="Google发布的Hash计算算法:CityHash64 与 CityHash128";
final long hash = HashUtil.cityHash64(StrUtil.utf8Bytes(s));
assertEquals(0x1d408f2bbf967e2aL, hash);
}
|
public boolean containsPK(List<String> cols) {
if (cols == null) {
return false;
}
List<String> pk = getPrimaryKeyOnlyName();
if (pk.isEmpty()) {
return false;
}
//at least contain one pk
if (cols.containsAll(pk)) {
return true;
} else {
return CollectionUtils.toUpperList(cols).containsAll(CollectionUtils.toUpperList(pk));
}
}
|
@Test
public void testContainsPKWithNullList() {
assertFalse(tableMeta.containsPK(null));
}
|
@Override
public ResponseHeader execute() throws SQLException {
check(sqlStatement, connectionSession.getConnectionContext().getGrantee());
if (isDropCurrentDatabase(sqlStatement.getDatabaseName())) {
checkSupportedDropCurrentDatabase(connectionSession);
connectionSession.setCurrentDatabaseName(null);
}
if (ProxyContext.getInstance().databaseExists(sqlStatement.getDatabaseName())) {
ProxyContext.getInstance().getContextManager().getPersistServiceFacade().getMetaDataManagerPersistService().dropDatabase(sqlStatement.getDatabaseName());
}
return new UpdateResponseHeader(sqlStatement);
}
|
@Test
void assertExecuteDropWithoutCurrentDatabase() throws SQLException {
when(sqlStatement.getDatabaseName()).thenReturn("foo_db");
ResponseHeader responseHeader = handler.execute();
verify(connectionSession, times(0)).setCurrentDatabaseName(null);
assertThat(responseHeader, instanceOf(UpdateResponseHeader.class));
}
|
@SuppressWarnings({"checkstyle:npathcomplexity", "checkstyle:cyclomaticcomplexity", "checkstyle:methodlength"})
void planMigrations(int partitionId, PartitionReplica[] oldReplicas, PartitionReplica[] newReplicas,
MigrationDecisionCallback callback) {
assert oldReplicas.length == newReplicas.length : "Replica addresses with different lengths! Old: "
+ Arrays.toString(oldReplicas) + ", New: " + Arrays.toString(newReplicas);
if (logger.isFinestEnabled()) {
logger.finest("partitionId=%d, Initial state: %s", partitionId, Arrays.toString(oldReplicas));
logger.finest("partitionId=%d, Final state: %s", partitionId, Arrays.toString(newReplicas));
}
initState(oldReplicas);
assertNoDuplicate(partitionId, oldReplicas, newReplicas);
// fix cyclic partition replica movements
if (fixCycle(oldReplicas, newReplicas)) {
if (logger.isFinestEnabled()) {
logger.finest("partitionId=%d, Final state (after cycle fix): %s", partitionId,
Arrays.toString(newReplicas));
}
}
int currentIndex = 0;
while (currentIndex < oldReplicas.length) {
if (logger.isFinestEnabled()) {
logger.finest("partitionId=%d, Current index: %d, state: %s", partitionId, currentIndex,
Arrays.toString(state));
}
assertNoDuplicate(partitionId, oldReplicas, newReplicas);
if (newReplicas[currentIndex] == null) {
if (state[currentIndex] != null) {
// replica owner is removed and no one will own this replica
logger.finest("partitionId=%d, New address is null at index: %d", partitionId, currentIndex);
callback.migrate(state[currentIndex], currentIndex, -1, null, -1, -1);
state[currentIndex] = null;
}
currentIndex++;
continue;
}
if (state[currentIndex] == null) {
int i = getReplicaIndex(state, newReplicas[currentIndex]);
if (i == -1) {
// fresh replica copy is needed, so COPY replica to newReplicas[currentIndex] from partition owner
logger.finest("partitionId=%d, COPY %s to index: %d", partitionId, newReplicas[currentIndex], currentIndex);
callback.migrate(null, -1, -1, newReplicas[currentIndex], -1, currentIndex);
state[currentIndex] = newReplicas[currentIndex];
currentIndex++;
continue;
}
if (i > currentIndex) {
// SHIFT UP replica from i to currentIndex, copy data from partition owner
logger.finest("partitionId=%d, SHIFT UP-2 %s from old addresses index: %d to index: %d", partitionId,
state[i], i, currentIndex);
callback.migrate(null, -1, -1, state[i], i, currentIndex);
state[currentIndex] = state[i];
state[i] = null;
continue;
}
throw new AssertionError("partitionId=" + partitionId
+ "Migration decision algorithm failed during SHIFT UP! INITIAL: " + Arrays.toString(oldReplicas)
+ ", CURRENT: " + Arrays.toString(state) + ", FINAL: " + Arrays.toString(newReplicas));
}
if (newReplicas[currentIndex].equals(state[currentIndex])) {
// no change, no action needed
currentIndex++;
continue;
}
if (getReplicaIndex(newReplicas, state[currentIndex]) == -1
&& getReplicaIndex(state, newReplicas[currentIndex]) == -1) {
// MOVE partition replica from its old owner to new owner
logger.finest("partitionId=%d, MOVE %s to index: %d", partitionId, newReplicas[currentIndex], currentIndex);
callback.migrate(state[currentIndex], currentIndex, -1, newReplicas[currentIndex], -1, currentIndex);
state[currentIndex] = newReplicas[currentIndex];
currentIndex++;
continue;
}
if (getReplicaIndex(state, newReplicas[currentIndex]) == -1) {
int newIndex = getReplicaIndex(newReplicas, state[currentIndex]);
assert newIndex > currentIndex : "partitionId=" + partitionId
+ ", Migration decision algorithm failed during SHIFT DOWN! INITIAL: "
+ Arrays.toString(oldReplicas) + ", CURRENT: " + Arrays.toString(state)
+ ", FINAL: " + Arrays.toString(newReplicas);
if (state[newIndex] == null) {
// it is a SHIFT DOWN
logger.finest("partitionId=%d, SHIFT DOWN %s to index: %d, COPY %s to index: %d", partitionId,
state[currentIndex], newIndex, newReplicas[currentIndex], currentIndex);
callback.migrate(state[currentIndex], currentIndex, newIndex, newReplicas[currentIndex], -1, currentIndex);
state[newIndex] = state[currentIndex];
} else {
logger.finest("partitionId=%d, MOVE-3 %s to index: %d", partitionId, newReplicas[currentIndex], currentIndex);
callback.migrate(state[currentIndex], currentIndex, -1, newReplicas[currentIndex], -1, currentIndex);
}
state[currentIndex] = newReplicas[currentIndex];
currentIndex++;
continue;
}
planMigrations(partitionId, oldReplicas, newReplicas, callback, currentIndex);
}
assert Arrays.equals(state, newReplicas)
: "partitionId=" + partitionId + ", Migration decisions failed! INITIAL: " + Arrays.toString(oldReplicas)
+ " CURRENT: " + Arrays.toString(state) + ", FINAL: " + Arrays.toString(newReplicas);
}
|
@Test
public void test_SHIFT_DOWN_performedAfterKnownNewReplicaOwnerKickedOutOfReplicas() throws UnknownHostException {
final PartitionReplica[] oldReplicas = {
new PartitionReplica(new Address("localhost", 5701), uuids[0]),
new PartitionReplica(new Address("localhost", 5702), uuids[1]),
new PartitionReplica(new Address("localhost", 5703), uuids[2]),
new PartitionReplica(new Address("localhost", 5705), uuids[4]),
null,
null,
null,
};
final PartitionReplica[] newReplicas = {
new PartitionReplica(new Address("localhost", 5704), uuids[3]),
new PartitionReplica(new Address("localhost", 5703), uuids[2]),
new PartitionReplica(new Address("localhost", 5705), uuids[4]),
new PartitionReplica(new Address("localhost", 5706), uuids[5]),
new PartitionReplica(new Address("localhost", 5702), uuids[1]),
new PartitionReplica(new Address("localhost", 5701), uuids[0]),
null,
};
migrationPlanner.planMigrations(0, oldReplicas, newReplicas, callback);
verify(callback).migrate(new PartitionReplica(new Address("localhost", 5701), uuids[0]), 0, 5, new PartitionReplica(new Address("localhost", 5704), uuids[3]), -1, 0);
verify(callback).migrate(new PartitionReplica(new Address("localhost", 5705), uuids[4]), 3, -1, new PartitionReplica(new Address("localhost", 5706), uuids[5]), -1, 3);
verify(callback).migrate(new PartitionReplica(new Address("localhost", 5703), uuids[2]), 2, -1, new PartitionReplica(new Address("localhost", 5705), uuids[4]), -1, 2);
verify(callback).migrate(new PartitionReplica(new Address("localhost", 5702), uuids[1]), 1, 4, new PartitionReplica(new Address("localhost", 5703), uuids[2]), -1, 1);
}
|
@Override
public DdlCommand create(
final String sqlExpression,
final DdlStatement ddlStatement,
final SessionConfig config
) {
return FACTORIES
.getOrDefault(ddlStatement.getClass(), (statement, cf, ci) -> {
throw new KsqlException(
"Unable to find ddl command factory for statement:"
+ statement.getClass()
+ " valid statements:"
+ FACTORIES.keySet()
);
})
.handle(
this,
new CallInfo(sqlExpression, config),
ddlStatement);
}
|
@Test
public void shouldCreateCommandForCreateSourceStream() {
// Given:
final CreateStream statement = new CreateStream(SOME_NAME,
TableElements.of(
tableElement("COL1", new Type(SqlTypes.BIGINT)),
tableElement("COL2", new Type(SqlTypes.STRING))),
false, true, withProperties, true);
// When:
final DdlCommand result = commandFactories
.create(sqlExpression, statement, SessionConfig.of(ksqlConfig, emptyMap()));
// Then:
assertThat(result, is(createStreamCommand));
verify(createSourceFactory).createStreamCommand(statement, ksqlConfig);
}
|
public PeriodStats plus(PeriodStats toAdd) {
PeriodStats result = new PeriodStats();
result.messagesSent += this.messagesSent;
result.messageSendErrors += this.messageSendErrors;
result.bytesSent += this.bytesSent;
result.messagesReceived += this.messagesReceived;
result.bytesReceived += this.bytesReceived;
result.totalMessagesSent += this.totalMessagesSent;
result.totalMessageSendErrors += this.totalMessageSendErrors;
result.totalMessagesReceived += this.totalMessagesReceived;
result.publishLatency.add(this.publishLatency);
result.publishDelayLatency.add(this.publishDelayLatency);
result.endToEndLatency.add(this.endToEndLatency);
result.messagesSent += toAdd.messagesSent;
result.messageSendErrors += toAdd.messageSendErrors;
result.bytesSent += toAdd.bytesSent;
result.messagesReceived += toAdd.messagesReceived;
result.bytesReceived += toAdd.bytesReceived;
result.totalMessagesSent += toAdd.totalMessagesSent;
result.totalMessageSendErrors += toAdd.totalMessageSendErrors;
result.totalMessagesReceived += toAdd.totalMessagesReceived;
result.publishLatency.add(toAdd.publishLatency);
result.publishDelayLatency.add(toAdd.publishDelayLatency);
result.endToEndLatency.add(toAdd.endToEndLatency);
return result;
}
|
@Test
void zeroPlus() {
PeriodStats one = new PeriodStats();
PeriodStats two = new PeriodStats();
two.messagesSent = 10;
two.messageSendErrors = 20;
two.bytesSent = 30;
two.messagesReceived = 40;
two.bytesReceived = 50;
two.totalMessagesSent = 60;
two.totalMessageSendErrors = 70;
two.totalMessagesReceived = 80;
PeriodStats result = one.plus(two);
assertThat(result)
.satisfies(
r -> {
assertThat(r.messagesSent).isEqualTo(10);
assertThat(r.messageSendErrors).isEqualTo(20);
assertThat(r.bytesSent).isEqualTo(30);
assertThat(r.messagesReceived).isEqualTo(40);
assertThat(r.bytesReceived).isEqualTo(50);
assertThat(r.totalMessagesSent).isEqualTo(60);
assertThat(r.totalMessageSendErrors).isEqualTo(70);
assertThat(r.totalMessagesReceived).isEqualTo(80);
assertThat(r.publishLatency).isEqualTo(two.publishLatency);
assertThat(r.publishDelayLatency).isEqualTo(two.publishDelayLatency);
assertThat(r.endToEndLatency).isEqualTo(two.endToEndLatency);
});
}
|
@Override
public <T extends Metric> T register(String name, T metric) throws IllegalArgumentException {
if (metric == null) {
throw new NullPointerException("metric == null");
}
return metric;
}
|
@Test
public void registeringATimerTriggersNoNotification() {
assertThat(registry.register("thing", timer)).isEqualTo(timer);
verify(listener, never()).onTimerAdded("thing", timer);
}
|
@Override
public boolean processRow( StepMetaInterface smi, StepDataInterface sdi ) throws KettleException {
Object[] r = null;
r = getRow();
if ( r == null ) { // no more rows to be expected from the previous step(s)
setOutputDone();
return false;
}
if ( data.firstRow ) {
// The output meta is the original input meta + the
// additional constant fields.
data.firstRow = false;
data.outputMeta = getInputRowMeta().clone();
meta.getFields( data.outputMeta, getStepname(), null, null, this, repository, metaStore );
}
// Add the constant data to the end of the row.
r = RowDataUtil.addRowData( r, getInputRowMeta().size(), data.getConstants().getData() );
putRow( data.outputMeta, r );
if ( log.isRowLevel() ) {
logRowlevel( BaseMessages.getString(
PKG, "Constant.Log.Wrote.Row", Long.toString( getLinesWritten() ), getInputRowMeta().getString( r ) ) );
}
if ( checkFeedback( getLinesWritten() ) ) {
if ( log.isBasic() ) {
logBasic( BaseMessages.getString( PKG, "Constant.Log.LineNr", Long.toString( getLinesWritten() ) ) );
}
}
return true;
}
|
@Test
public void testProcessRow_success() throws Exception {
doReturn( new Object[1] ).when( constantSpy ).getRow();
doReturn( new RowMeta() ).when( constantSpy ).getInputRowMeta();
doReturn( new Object[1] ).when( rowMetaAndData ).getData();
boolean success = constantSpy.processRow( constantMeta, constantData );
assertTrue( success );
}
|
public static String sha512Hex(final String data) {
return Optional.ofNullable(data).filter(StringUtils::isNoneEmpty).map(item -> {
try {
return org.apache.commons.codec.digest.DigestUtils.sha512Hex(data);
} catch (Exception e) {
throw new ShenyuException(e);
}
}).orElse(null);
}
|
@Test
public void testSha512Hex() {
assertThat(DigestUtils.sha512Hex("123456"),
is("ba3253876aed6bc22d4a6ff53d8406c6ad864195ed144ab5c87621b6c233b548baeae6956df346ec8c17f5ea10f35ee3cbc514797ed7ddd3145464e2a0bab413"));
}
|
@Override
public BulkOperationResponse executeBulkOperation(final BulkOperationRequest bulkOperationRequest, final C userContext, final AuditParams params) {
if (bulkOperationRequest.entityIds() == null || bulkOperationRequest.entityIds().isEmpty()) {
throw new BadRequestException(NO_ENTITY_IDS_ERROR);
}
List<BulkOperationFailure> capturedFailures = new LinkedList<>();
for (String entityId : bulkOperationRequest.entityIds()) {
try {
T entityModel = singleEntityOperationExecutor.execute(entityId, userContext);
try {
if (params != null) {
auditEventSender.success(getAuditActor(userContext), params.eventType(), successAuditLogContextCreator.create(entityModel, params.entityClass()));
}
} catch (Exception auditLogStoreException) {
//exception on audit log storing should not result in failure report, as the operation itself is successful
LOG.error("Failed to store in the audit log information about successful entity removal via bulk action ", auditLogStoreException);
}
} catch (Exception ex) {
capturedFailures.add(new BulkOperationFailure(entityId, ex.getMessage()));
try {
if (params != null) {
auditEventSender.failure(getAuditActor(userContext), params.eventType(), failureAuditLogContextCreator.create(params.entityIdInPathParam(), entityId));
}
} catch (Exception auditLogStoreException) {
//exception on audit log storing should not result in failure report, as the operation itself is successful
LOG.error("Failed to store in the audit log information about failed entity removal via bulk action ", auditLogStoreException);
}
}
}
return new BulkOperationResponse(
bulkOperationRequest.entityIds().size() - capturedFailures.size(),
capturedFailures);
}
|
@Test
void exceptionInAuditLogStoringDoesNotInfluenceResponse() throws Exception {
mockUserContext();
doThrow(new MongoException("MongoDB audit_log collection became anti-collection when bombed by Bozon particles")).when(auditEventSender).success(any(), anyString(), any());
final BulkOperationResponse bulkOperationResponse = toTest.executeBulkOperation(new BulkOperationRequest(List.of("1")), context, params);
assertThat(bulkOperationResponse.successfullyPerformed()).isEqualTo(1);
assertThat(bulkOperationResponse.failures()).isEmpty();
verify(singleEntityOperationExecutor).execute("1", context);
verifyNoMoreInteractions(singleEntityOperationExecutor);
}
|
@Override
public void notify(Metrics metrics) {
WithMetadata withMetadata = (WithMetadata) metrics;
MetricsMetaInfo meta = withMetadata.getMeta();
int scope = meta.getScope();
if (!DefaultScopeDefine.inServiceCatalog(scope) && !DefaultScopeDefine.inServiceInstanceCatalog(scope)
&& !DefaultScopeDefine.inEndpointCatalog(scope) && !DefaultScopeDefine.inServiceRelationCatalog(scope)
&& !DefaultScopeDefine.inServiceInstanceRelationCatalog(scope) && !DefaultScopeDefine.inEndpointRelationCatalog(scope)) {
return;
}
MetaInAlarm metaInAlarm;
if (DefaultScopeDefine.inServiceCatalog(scope)) {
final String serviceId = meta.getId();
final IDManager.ServiceID.ServiceIDDefinition serviceIDDefinition = IDManager.ServiceID.analysisId(
serviceId);
ServiceMetaInAlarm serviceMetaInAlarm = new ServiceMetaInAlarm();
serviceMetaInAlarm.setMetricsName(meta.getMetricsName());
serviceMetaInAlarm.setId(serviceId);
serviceMetaInAlarm.setName(serviceIDDefinition.getName());
metaInAlarm = serviceMetaInAlarm;
} else if (DefaultScopeDefine.inServiceInstanceCatalog(scope)) {
final String instanceId = meta.getId();
final IDManager.ServiceInstanceID.InstanceIDDefinition instanceIDDefinition = IDManager.ServiceInstanceID.analysisId(
instanceId);
final IDManager.ServiceID.ServiceIDDefinition serviceIDDefinition = IDManager.ServiceID.analysisId(
instanceIDDefinition.getServiceId());
ServiceInstanceMetaInAlarm instanceMetaInAlarm = new ServiceInstanceMetaInAlarm();
instanceMetaInAlarm.setMetricsName(meta.getMetricsName());
instanceMetaInAlarm.setId(instanceId);
instanceMetaInAlarm.setName(instanceIDDefinition.getName() + " of " + serviceIDDefinition.getName());
metaInAlarm = instanceMetaInAlarm;
} else if (DefaultScopeDefine.inEndpointCatalog(scope)) {
final String endpointId = meta.getId();
final IDManager.EndpointID.EndpointIDDefinition endpointIDDefinition = IDManager.EndpointID.analysisId(
endpointId);
final IDManager.ServiceID.ServiceIDDefinition serviceIDDefinition = IDManager.ServiceID.analysisId(
endpointIDDefinition.getServiceId());
EndpointMetaInAlarm endpointMetaInAlarm = new EndpointMetaInAlarm();
endpointMetaInAlarm.setMetricsName(meta.getMetricsName());
endpointMetaInAlarm.setId(meta.getId());
endpointMetaInAlarm.setName(
endpointIDDefinition.getEndpointName() + " in " + serviceIDDefinition.getName());
metaInAlarm = endpointMetaInAlarm;
} else if (DefaultScopeDefine.inServiceRelationCatalog(scope)) {
final String serviceRelationId = meta.getId();
final IDManager.ServiceID.ServiceRelationDefine serviceRelationDefine = IDManager.ServiceID.analysisRelationId(
serviceRelationId);
final IDManager.ServiceID.ServiceIDDefinition sourceIdDefinition = IDManager.ServiceID.analysisId(
serviceRelationDefine.getSourceId());
final IDManager.ServiceID.ServiceIDDefinition destIdDefinition = IDManager.ServiceID.analysisId(
serviceRelationDefine.getDestId());
ServiceRelationMetaInAlarm serviceRelationMetaInAlarm = new ServiceRelationMetaInAlarm();
serviceRelationMetaInAlarm.setMetricsName(meta.getMetricsName());
serviceRelationMetaInAlarm.setId(serviceRelationId);
serviceRelationMetaInAlarm.setName(sourceIdDefinition.getName() + " to " + destIdDefinition.getName());
metaInAlarm = serviceRelationMetaInAlarm;
} else if (DefaultScopeDefine.inServiceInstanceRelationCatalog(scope)) {
final String instanceRelationId = meta.getId();
final IDManager.ServiceInstanceID.ServiceInstanceRelationDefine serviceRelationDefine = IDManager.ServiceInstanceID.analysisRelationId(
instanceRelationId);
final IDManager.ServiceInstanceID.InstanceIDDefinition sourceIdDefinition = IDManager.ServiceInstanceID.analysisId(
serviceRelationDefine.getSourceId());
final IDManager.ServiceID.ServiceIDDefinition sourceServiceId = IDManager.ServiceID.analysisId(
sourceIdDefinition.getServiceId());
final IDManager.ServiceInstanceID.InstanceIDDefinition destIdDefinition = IDManager.ServiceInstanceID.analysisId(
serviceRelationDefine.getDestId());
final IDManager.ServiceID.ServiceIDDefinition destServiceId = IDManager.ServiceID.analysisId(
destIdDefinition.getServiceId());
ServiceInstanceRelationMetaInAlarm instanceRelationMetaInAlarm = new ServiceInstanceRelationMetaInAlarm();
instanceRelationMetaInAlarm.setMetricsName(meta.getMetricsName());
instanceRelationMetaInAlarm.setId(instanceRelationId);
instanceRelationMetaInAlarm.setName(sourceIdDefinition.getName() + " of " + sourceServiceId.getName()
+ " to " + destIdDefinition.getName() + " of " + destServiceId.getName());
metaInAlarm = instanceRelationMetaInAlarm;
} else if (DefaultScopeDefine.inEndpointRelationCatalog(scope)) {
final String endpointRelationId = meta.getId();
final IDManager.EndpointID.EndpointRelationDefine endpointRelationDefine = IDManager.EndpointID.analysisRelationId(
endpointRelationId);
final IDManager.ServiceID.ServiceIDDefinition sourceService = IDManager.ServiceID.analysisId(
endpointRelationDefine.getSourceServiceId());
final IDManager.ServiceID.ServiceIDDefinition destService = IDManager.ServiceID.analysisId(
endpointRelationDefine.getDestServiceId());
EndpointRelationMetaInAlarm endpointRelationMetaInAlarm = new EndpointRelationMetaInAlarm();
endpointRelationMetaInAlarm.setMetricsName(meta.getMetricsName());
endpointRelationMetaInAlarm.setId(endpointRelationId);
endpointRelationMetaInAlarm.setName(endpointRelationDefine.getSource() + " in " + sourceService.getName()
+ " to " + endpointRelationDefine.getDest() + " in " + destService.getName());
metaInAlarm = endpointRelationMetaInAlarm;
} else {
return;
}
List<RunningRule> runningRules = core.findRunningRule(meta.getMetricsName());
if (runningRules == null) {
return;
}
runningRules.forEach(rule -> rule.in(metaInAlarm, metrics));
}
|
@Test
public void dontNotify() {
MetricsMetaInfo metadata = mock(MetricsMetaInfo.class);
when(metadata.getScope()).thenReturn(DefaultScopeDefine.SERVICE);
MockMetrics mockMetrics = mock(MockMetrics.class);
when(mockMetrics.getMeta()).thenReturn(metadata);
notifyHandler.notify(mockMetrics);
}
|
public static StatementExecutorResponse validate(
final ConfiguredStatement<CreateConnector> statement,
final SessionProperties sessionProperties,
final KsqlExecutionContext executionContext,
final ServiceContext serviceContext
) {
final CreateConnector createConnector = statement.getStatement();
final ConnectClient client = serviceContext.getConnectClient();
if (checkForExistingConnector(statement, createConnector, client)) {
final String errorMsg = String.format(
"Connector %s already exists", createConnector.getName());
throw new KsqlRestException(EndpointResponse.create()
.status(HttpStatus.SC_CONFLICT)
.entity(new KsqlErrorMessage(Errors.toErrorCode(HttpStatus.SC_CONFLICT), errorMsg))
.build()
);
}
final List<String> errors = validateConfigs(createConnector, client);
if (!errors.isEmpty()) {
final String errorMessage = "Validation error: " + String.join("\n", errors);
throw new KsqlException(errorMessage);
}
return StatementExecutorResponse.handled(Optional.of(new CreateConnectorEntity(
statement.getMaskedStatementText(),
DUMMY_CREATE_RESPONSE
)));
}
|
@Test
public void shouldThrowOnValidateIfConnectorExists() {
// Given:
givenConnectorExists();
// When:
final KsqlRestException e = assertThrows(
KsqlRestException.class,
() -> ConnectExecutor.validate(CREATE_CONNECTOR_CONFIGURED, mock(SessionProperties.class), null, serviceContext));
// Then:
assertThat(e.getResponse().getStatus(), is(HttpStatus.SC_CONFLICT));
final KsqlErrorMessage err = (KsqlErrorMessage) e.getResponse().getEntity();
assertThat(err.getErrorCode(), is(Errors.toErrorCode(HttpStatus.SC_CONFLICT)));
assertThat(err.getMessage(), containsString("Connector foo already exists"));
}
|
@VisibleForTesting
int execute(String[] args) {
return commander.execute(args);
}
|
@Test(dataProvider = "desiredExpireTime")
public void commandCreateToken_WhenCreatingATokenWithExpiryTime_ShouldHaveTheDesiredExpireTime(String expireTime, int expireAsSec) throws Exception {
PrintStream oldStream = System.out;
try {
//Arrange
ByteArrayOutputStream baoStream = new ByteArrayOutputStream();
System.setOut(new PrintStream(baoStream));
String[] command = {"create", "--secret-key",
"data:;base64,u+FxaxYWpsTfxeEmMh8fQeS3g2jfXw4+sGIv+PTY+BY=",
"--subject", "test",
"--expiry-time", expireTime,
};
new TokensCliUtils().execute(command);
String token = baoStream.toString();
Instant start = (new Date().toInstant().plus(expireAsSec - 5, ChronoUnit.SECONDS));
Instant stop = (new Date().toInstant().plus(expireAsSec + 5, ChronoUnit.SECONDS));
//Act
Claims jwt = Jwts.parserBuilder()
.setSigningKey(Decoders.BASE64.decode("u+FxaxYWpsTfxeEmMh8fQeS3g2jfXw4+sGIv+PTY+BY="))
.build()
.parseClaimsJws(token)
.getBody();
//Assert
//Checks if the token expires within +-5 sec.
assertTrue(( ! jwt.getExpiration().toInstant().isBefore( start ) ) && ( jwt.getExpiration().toInstant().isBefore( stop ) ));
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
System.setOut(oldStream);
}
}
|
public static OperationAuditor createAndInitAuditor(
Configuration conf,
String key,
OperationAuditorOptions options) throws IOException {
final Class<? extends OperationAuditor> auditClassname
= conf.getClass(
key,
LoggingAuditor.class,
OperationAuditor.class);
try {
LOG.debug("Auditor class is {}", auditClassname);
final Constructor<? extends OperationAuditor> constructor
= auditClassname.getConstructor();
final OperationAuditor instance = constructor.newInstance();
instance.init(options);
return instance;
} catch (NoSuchMethodException | InstantiationException
| RuntimeException
| IllegalAccessException | InvocationTargetException e) {
throw new IOException("Failed to instantiate class "
+ auditClassname
+ " defined in " + key
+ ": " + e,
e);
}
}
|
@Test
public void testCreateNonexistentAuditor() throws Throwable {
final Configuration conf = new Configuration();
OperationAuditorOptions options =
OperationAuditorOptions.builder()
.withConfiguration(conf)
.withIoStatisticsStore(ioStatistics);
conf.set(AUDIT_SERVICE_CLASSNAME, "not.a.known.class");
intercept(RuntimeException.class, () ->
AuditIntegration.createAndInitAuditor(conf,
AUDIT_SERVICE_CLASSNAME,
options));
}
|
@Override
public boolean addTopicConfig(final String topicName, final Map<String, ?> overrides) {
final ConfigResource resource = new ConfigResource(ConfigResource.Type.TOPIC, topicName);
final Map<String, String> stringConfigs = toStringConfigs(overrides);
try {
final Map<String, String> existingConfig = topicConfig(topicName, false);
final boolean changed = stringConfigs.entrySet().stream()
.anyMatch(e -> !Objects.equals(existingConfig.get(e.getKey()), e.getValue()));
if (!changed) {
return false;
}
final Set<AlterConfigOp> entries = stringConfigs.entrySet().stream()
.map(e -> new ConfigEntry(e.getKey(), e.getValue()))
.map(ce -> new AlterConfigOp(ce, AlterConfigOp.OpType.SET))
.collect(Collectors.toSet());
final Map<ConfigResource, Collection<AlterConfigOp>> request =
Collections.singletonMap(resource, entries);
ExecutorUtil.executeWithRetries(
() -> adminClient.get().incrementalAlterConfigs(request).all().get(),
ExecutorUtil.RetryBehaviour.ON_RETRYABLE);
return true;
} catch (final UnsupportedVersionException e) {
return addTopicConfigLegacy(topicName, stringConfigs);
} catch (final Exception e) {
throw new KafkaResponseGetFailedException(
"Failed to set config for Kafka Topic " + topicName, e);
}
}
|
@Test
public void shouldSetStringTopicConfig() {
// Given:
givenTopicConfigs(
"peter",
overriddenConfigEntry(TopicConfig.RETENTION_MS_CONFIG, "12345"),
defaultConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "snappy")
);
final Map<String, ?> configOverrides = ImmutableMap.of(
CLEANUP_POLICY_CONFIG, CLEANUP_POLICY_COMPACT
);
// When:
final boolean changed = kafkaTopicClient.addTopicConfig("peter", configOverrides);
// Then:
assertThat("should return true", changed);
verify(adminClient).incrementalAlterConfigs(ImmutableMap.of(
topicResource("peter"),
ImmutableSet.of(
setConfig(CLEANUP_POLICY_CONFIG, CLEANUP_POLICY_COMPACT)
)
));
}
|
public void setTemplateEntriesForChild(CapacitySchedulerConfiguration conf,
QueuePath childQueuePath) {
setTemplateEntriesForChild(conf, childQueuePath, false);
}
|
@Test
public void testTemplatePrecedence() {
conf.set(getTemplateKey(TEST_QUEUE_AB, "capacity"), "6w");
conf.set(getTemplateKey(TEST_QUEUE_A_WILDCARD, "capacity"), "4w");
conf.set(getTemplateKey(TEST_QUEUE_TWO_LEVEL_WILDCARDS, "capacity"), "2w");
AutoCreatedQueueTemplate template =
new AutoCreatedQueueTemplate(conf, TEST_QUEUE_AB);
template.setTemplateEntriesForChild(conf, TEST_QUEUE_ABC);
Assert.assertEquals(
"explicit template does not have the highest precedence", 6f,
conf.getNonLabeledQueueWeight(TEST_QUEUE_ABC), 10e-6);
CapacitySchedulerConfiguration newConf =
new CapacitySchedulerConfiguration();
newConf.set(getTemplateKey(TEST_QUEUE_A_WILDCARD, "capacity"), "4w");
template =
new AutoCreatedQueueTemplate(newConf, TEST_QUEUE_AB);
template.setTemplateEntriesForChild(newConf, TEST_QUEUE_ABC);
Assert.assertEquals("precedence is invalid", 4f,
newConf.getNonLabeledQueueWeight(TEST_QUEUE_ABC), 10e-6);
}
|
@Override
public void shutdown() {
writeBehindService.stop();
connectionManager.shutdown();
}
|
@Test
public void testShutdown() {
RedissonClient r = createInstance();
Assertions.assertFalse(r.isShuttingDown());
Assertions.assertFalse(r.isShutdown());
r.shutdown();
Assertions.assertTrue(r.isShuttingDown());
Assertions.assertTrue(r.isShutdown());
}
|
@Override
public Optional<ReadError> read(DbFileSources.Line.Builder lineBuilder) {
if (readError == null) {
try {
processSymbols(lineBuilder);
} catch (RangeOffsetConverter.RangeOffsetConverterException e) {
readError = new ReadError(Data.SYMBOLS, lineBuilder.getLine());
LOG.warn(format("Inconsistency detected in Symbols data. Symbols will be ignored for file '%s'", file.getKey()), e);
}
}
return Optional.ofNullable(readError);
}
|
@Test
public void read_symbols_when_reference_line_is_before_declaration_line() {
SymbolsLineReader symbolsLineReader = newReader(newSymbol(
newSingleLineTextRangeWithExpectedLabel(LINE_2, OFFSET_3, OFFSET_4, RANGE_LABEL_1),
newSingleLineTextRangeWithExpectedLabel(LINE_1, OFFSET_1, OFFSET_2, RANGE_LABEL_2)));
assertThat(symbolsLineReader.read(line1)).isEmpty();
assertThat(symbolsLineReader.read(line2)).isEmpty();
assertThat(line1.getSymbols()).isEqualTo(RANGE_LABEL_2 + ",1");
assertThat(line2.getSymbols()).isEqualTo(RANGE_LABEL_1 + ",1");
}
|
public long contentSize()
{
long size = 0;
for (ZFrame f : frames) {
size += f.size();
}
return size;
}
|
@Test
public void testContentSize()
{
ZMsg msg = new ZMsg();
msg.add(new byte[0]);
assertThat(msg.contentSize(), is(0L));
msg.add(new byte[1]);
assertThat(msg.contentSize(), is(1L));
}
|
public static String getFullGcsPath(String... pathParts) {
checkArgument(pathParts.length != 0, "Must provide at least one path part");
checkArgument(
stream(pathParts).noneMatch(Strings::isNullOrEmpty), "No path part can be null or empty");
return String.format("gs://%s", String.join("/", pathParts));
}
|
@Test
public void testGetFullGcsPathOneNullValue() {
assertThrows(
IllegalArgumentException.class,
() -> ArtifactUtils.getFullGcsPath("bucket", null, "dir2", "file"));
}
|
public int getNewReservationFailedRetrieved() {
return numGetNewReservationFailedRetrieved.value();
}
|
@Test
public void testGetNewReservationRetrievedFailed() {
long totalBadBefore = metrics.getNewReservationFailedRetrieved();
badSubCluster.getNewReservationFailed();
Assert.assertEquals(totalBadBefore + 1,
metrics.getNewReservationFailedRetrieved());
}
|
public final Span joinSpan(TraceContext context) {
if (context == null) throw new NullPointerException("context == null");
if (!supportsJoin) return newChild(context);
// set shared flag if not already done
int flags = InternalPropagation.instance.flags(context);
if (!context.shared()) {
flags |= FLAG_SHARED;
return toSpan(context, InternalPropagation.instance.withFlags(context, flags));
} else {
flags &= ~FLAG_SHARED;
return toSpan(InternalPropagation.instance.withFlags(context, flags), context);
}
}
|
@Test void joinSpan_decorates() {
propagationFactory = baggageFactory;
TraceContext incoming = TraceContext.newBuilder().traceId(1L).spanId(2L).sampled(true)
.shared(true).build();
TraceContext joined = tracer.joinSpan(incoming).context();
assertThat(joined).isNotSameAs(incoming);
assertThat(joined.extra()).isNotEmpty();
}
|
@Operation(summary = "queryAllWorkerGroupsPaging", description = "QUERY_WORKER_GROUP_PAGING_NOTES")
@Parameters({
@Parameter(name = "pageNo", description = "PAGE_NO", required = true, schema = @Schema(implementation = int.class, example = "1")),
@Parameter(name = "pageSize", description = "PAGE_SIZE", required = true, schema = @Schema(implementation = int.class, example = "20")),
@Parameter(name = "searchVal", description = "SEARCH_VAL", schema = @Schema(implementation = String.class))
})
@GetMapping()
@ResponseStatus(HttpStatus.OK)
@ApiException(QUERY_WORKER_GROUP_FAIL)
public Result queryAllWorkerGroupsPaging(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("pageNo") Integer pageNo,
@RequestParam("pageSize") Integer pageSize,
@RequestParam(value = "searchVal", required = false) String searchVal) {
checkPageParams(pageNo, pageSize);
searchVal = ParameterUtils.handleEscapes(searchVal);
return workerGroupService.queryAllGroupPaging(loginUser, pageNo, pageSize, searchVal);
}
|
@Test
public void testQueryAllWorkerGroupsPaging() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("pageNo", "2");
paramsMap.add("searchVal", "cxc");
paramsMap.add("pageSize", "2");
MvcResult mvcResult = mockMvc.perform(get("/worker-groups")
.header("sessionId", sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assertions.assertTrue(result != null && result.isSuccess());
logger.info(mvcResult.getResponse().getContentAsString());
}
|
public GoConfigHolder loadConfigHolder(final String content, Callback callback) throws Exception {
CruiseConfig configForEdit;
CruiseConfig config;
LOGGER.debug("[Config Save] Loading config holder");
configForEdit = deserializeConfig(content);
if (callback != null) callback.call(configForEdit);
config = preprocessAndValidate(configForEdit);
return new GoConfigHolder(config, configForEdit);
}
|
@Test
void shouldNotAllowConfigWithEmptyEnvironmentsBlock() {
String content = configWithEnvironments(
"<environments>\n"
+ "</environments>", CONFIG_SCHEMA_VERSION);
assertThatThrownBy(() -> xmlLoader.loadConfigHolder(content))
.as("XSD should not allow empty environments block")
.hasMessageContaining("The content of element 'environments' is not complete. One of '{environment}' is expected.");
}
|
void remove(int brokerId) {
BrokerHeartbeatState broker = brokers.remove(brokerId);
if (broker != null) {
untrack(broker);
}
}
|
@Test
public void testMetadataOffsetComparator() {
TreeSet<BrokerHeartbeatState> set =
new TreeSet<>(BrokerHeartbeatManager.MetadataOffsetComparator.INSTANCE);
BrokerHeartbeatState broker1 = new BrokerHeartbeatState(1);
BrokerHeartbeatState broker2 = new BrokerHeartbeatState(2);
BrokerHeartbeatState broker3 = new BrokerHeartbeatState(3);
set.add(broker1);
set.add(broker2);
set.add(broker3);
Iterator<BrokerHeartbeatState> iterator = set.iterator();
assertEquals(broker1, iterator.next());
assertEquals(broker2, iterator.next());
assertEquals(broker3, iterator.next());
assertFalse(iterator.hasNext());
assertTrue(set.remove(broker1));
assertTrue(set.remove(broker2));
assertTrue(set.remove(broker3));
assertTrue(set.isEmpty());
broker1.metadataOffset = 800;
broker2.metadataOffset = 400;
broker3.metadataOffset = 100;
set.add(broker1);
set.add(broker2);
set.add(broker3);
iterator = set.iterator();
assertEquals(broker3, iterator.next());
assertEquals(broker2, iterator.next());
assertEquals(broker1, iterator.next());
assertFalse(iterator.hasNext());
}
|
@Override
public Mono<ClientResponse> filter(ClientRequest request, ExchangeFunction exchangeFunction) {
HttpHeaders readOnlyHttpHeaders = request.headers();
RequestTag requestTag = ThreadLocalUtils.getRequestTag();
if (requestTag == null) {
ThreadLocalUtils.setRequestData(new RequestData(readOnlyHttpHeaders, request.url().getPath(),
request.method().name()));
return exchangeFunction.exchange(request);
}
Builder newRequestBuilder = ClientRequest.from(request);
requestTag.getTag().forEach((key, value) -> newRequestBuilder.header(key, value.toArray(new String[0])));
ClientRequest newRequest = newRequestBuilder.build();
ThreadLocalUtils.setRequestData(new RequestData(newRequest.headers(), newRequest.url().getPath(),
newRequest.method().name()));
return exchangeFunction.exchange(newRequest);
}
|
@Test
public void testFilterWithoutRequestTag() {
// when requestTag is null
ClientResponse response = function.filter(request, exchangeFunction).block();
Assert.assertNotNull(response);
HttpHeaders httpHeaders = response.headers().asHttpHeaders();
Assert.assertEquals(1, httpHeaders.size());
List<String> foos = httpHeaders.get("foo");
Assert.assertNotNull(foos);
Assert.assertEquals("foo1", foos.get(0));
RequestData requestData = ThreadLocalUtils.getRequestData();
Map<String, List<String>> tag = requestData.getTag();
Assert.assertEquals("foo1", tag.get("foo").get(0));
Assert.assertEquals("/foo", requestData.getPath());
Assert.assertEquals("GET", requestData.getHttpMethod());
}
|
public static CloudConfiguration buildCloudConfigurationForStorage(Map<String, String> properties) {
return buildCloudConfigurationForStorage(properties, false);
}
|
@Test
public void testAliyunCloudConfiguration() {
Map<String, String> map = new HashMap<String, String>() {
{
put(CloudConfigurationConstants.ALIYUN_OSS_ACCESS_KEY, "XX");
put(CloudConfigurationConstants.ALIYUN_OSS_SECRET_KEY, "YY");
put(CloudConfigurationConstants.ALIYUN_OSS_ENDPOINT, "ZZ");
}
};
CloudConfiguration cc = CloudConfigurationFactory.buildCloudConfigurationForStorage(map);
Assert.assertEquals(cc.getCloudType(), CloudType.ALIYUN);
TCloudConfiguration tc = new TCloudConfiguration();
cc.toThrift(tc);
Assert.assertEquals(tc.getCloud_properties().get(CloudConfigurationConstants.AWS_S3_ENABLE_SSL), "true");
Configuration conf = new Configuration();
cc.applyToConfiguration(conf);
cc.toFileStoreInfo();
Assert.assertEquals(cc.toConfString(),
"AliyunCloudConfiguration{resources='', jars='', hdpuser='', cred=AliyunCloudCredential{accessKey='XX', " +
"secretKey='YY', endpoint='ZZ'}}");
}
|
public Map<String, String> scheduleMinionTasks(@Nullable String taskType, @Nullable String tableNameWithType)
throws IOException, HttpException {
HttpPost httpPost = createHttpPostRequest(
MinionRequestURLBuilder.baseUrl(_controllerUrl).forTaskSchedule(taskType, tableNameWithType));
try (CloseableHttpResponse response = HTTP_CLIENT.execute(httpPost)) {
int statusCode = response.getCode();
final String responseString = EntityUtils.toString(response.getEntity());
if (statusCode >= 400) {
throw new HttpException(
String.format("Unable to schedule minion tasks. Error code %d, Error message: %s", statusCode,
responseString));
}
return JsonUtils.stringToObject(responseString, TYPEREF_MAP_STRING_STRING);
}
}
|
@Test
public void testTaskSchedule()
throws IOException, HttpException {
HttpServer httpServer = startServer(14202, "/tasks/schedule",
createHandler(200, "{\"SegmentGenerationAndPushTask\":\"Task_SegmentGenerationAndPushTask_1607470525615\"}",
0));
MinionClient minionClient = new MinionClient("http://localhost:14202/", null);
Assert.assertEquals(minionClient.scheduleMinionTasks(null, null).get("SegmentGenerationAndPushTask"),
"Task_SegmentGenerationAndPushTask_1607470525615");
httpServer.stop(0);
}
|
@Override
public SCMPluginInfo pluginInfoFor(GoPluginDescriptor descriptor) {
SCMPropertyConfiguration scmConfiguration = extension.getSCMConfiguration(descriptor.id());
SCMView scmView = extension.getSCMView(descriptor.id());
PluggableInstanceSettings pluginSettingsAndView = getPluginSettingsAndView(descriptor, extension);
if (scmConfiguration == null) {
throw new RuntimeException(format("Plugin[%s] returned null scm configuration", descriptor.id()));
}
if (scmView == null) {
throw new RuntimeException(format("Plugin[%s] returned null scm view", descriptor.id()));
}
PluggableInstanceSettings scmSettings = new PluggableInstanceSettings(scmPluginConfigurations(scmConfiguration), new PluginView(scmView.template()));
return new SCMPluginInfo(descriptor, scmView.displayValue(), scmSettings, pluginSettingsAndView);
}
|
@Test
public void shouldThrowAnExceptionIfScmConfigReturnedByPluginIsNull() {
GoPluginDescriptor descriptor = GoPluginDescriptor.builder().id("plugin1").build();
when(extension.getSCMConfiguration("plugin1")).thenReturn(null);
assertThatThrownBy(() -> new SCMPluginInfoBuilder(extension).pluginInfoFor(descriptor))
.hasMessage("Plugin[plugin1] returned null scm configuration");
}
|
public static UnicastMappingInstruction unicastWeight(int weight) {
return new UnicastMappingInstruction.WeightMappingInstruction(
UnicastType.WEIGHT, weight);
}
|
@Test
public void testUnicastWeightMethod() {
final MappingInstruction instruction = MappingInstructions.unicastWeight(2);
final UnicastMappingInstruction.WeightMappingInstruction weightInstruction =
checkAndConvert(instruction,
UnicastMappingInstruction.Type.UNICAST,
UnicastMappingInstruction.WeightMappingInstruction.class);
assertThat(weightInstruction.weight(), is(equalTo(2)));
}
|
public static String serializeRecordToJson(Record<GenericObject> record) {
checkNotNull(record, "record can't be null");
final org.apache.pulsar.client.api.Message<GenericObject> recordMessage = getMessage(record);
JsonObject result = new JsonObject();
result.addProperty(PAYLOAD_FIELD, getEncoder().encodeToString(recordMessage.getData()));
if (record.getProperties() != null) {
JsonObject properties = new JsonObject();
record.getProperties().forEach(properties::addProperty);
result.add(PROPERTIES_FIELD, properties);
}
final Optional<EncryptionContext> optEncryptionCtx = recordMessage.getEncryptionCtx();
if (optEncryptionCtx.isPresent()) {
EncryptionContext encryptionCtx = optEncryptionCtx.get();
JsonObject encryptionCtxJson = new JsonObject();
JsonObject keyBase64Map = new JsonObject();
JsonObject keyMetadataMap = new JsonObject();
encryptionCtx.getKeys().forEach((key, value) -> {
keyBase64Map.addProperty(key, getEncoder().encodeToString(value.getKeyValue()));
Map<String, String> keyMetadata = value.getMetadata();
if (keyMetadata != null && !keyMetadata.isEmpty()) {
JsonObject metadata = new JsonObject();
value.getMetadata().forEach(metadata::addProperty);
keyMetadataMap.add(key, metadata);
}
});
encryptionCtxJson.add(KEY_MAP_FIELD, keyBase64Map);
encryptionCtxJson.add(KEY_METADATA_MAP_FIELD, keyMetadataMap);
encryptionCtxJson.addProperty(ENCRYPTION_PARAM_FIELD,
getEncoder().encodeToString(encryptionCtx.getParam()));
encryptionCtxJson.addProperty(ALGO_FIELD, encryptionCtx.getAlgorithm());
if (encryptionCtx.getCompressionType() != null) {
encryptionCtxJson.addProperty(COMPRESSION_TYPE_FIELD, encryptionCtx.getCompressionType().name());
encryptionCtxJson.addProperty(UNCPRESSED_MSG_SIZE_FIELD, encryptionCtx.getUncompressedMessageSize());
}
if (encryptionCtx.getBatchSize().isPresent()) {
encryptionCtxJson.addProperty(BATCH_SIZE_FIELD, encryptionCtx.getBatchSize().get());
}
result.add(ENCRYPTION_CTX_FIELD, encryptionCtxJson);
}
return result.toString();
}
|
@Test
public void testJsonSerialization() {
final String[] keyNames = {"key1", "key2"};
final String key1Value = "test1";
final String key2Value = "test2";
final byte[][] keyValues = {key1Value.getBytes(), key2Value.getBytes()};
final String param = "param";
final String algo = "algo";
int batchSize = 10;
int compressionMsgSize = 10;
// serialize to json
byte[] data = "payload".getBytes();
Map<String, String> properties = Maps.newHashMap();
properties.put("prop1", "value");
Map<String, String> metadata1 = Maps.newHashMap();
metadata1.put("version", "v1");
metadata1.put("ckms", "cmks-1");
Map<String, String> metadata2 = Maps.newHashMap();
metadata2.put("version", "v2");
metadata2.put("ckms", "cmks-2");
Record<GenericObject> recordCtx =
createRecord(data, algo, keyNames, keyValues, param.getBytes(), metadata1, metadata2,
batchSize, compressionMsgSize, properties, true);
String json = Utils.serializeRecordToJson(recordCtx);
// deserialize from json and assert
KinesisMessageResponse kinesisJsonResponse = deSerializeRecordFromJson(json);
assertEquals(data, getDecoder().decode(kinesisJsonResponse.getPayloadBase64()));
EncryptionCtx encryptionCtxDeser = kinesisJsonResponse.getEncryptionCtx();
assertEquals(key1Value.getBytes(),
getDecoder().decode(encryptionCtxDeser.getKeysMapBase64().get(keyNames[0])));
assertEquals(key2Value.getBytes(),
getDecoder().decode(encryptionCtxDeser.getKeysMapBase64().get(keyNames[1])));
assertEquals(param.getBytes(), getDecoder().decode(encryptionCtxDeser.getEncParamBase64()));
assertEquals(algo, encryptionCtxDeser.getAlgorithm());
assertEquals(metadata1, encryptionCtxDeser.getKeysMetadataMap().get(keyNames[0]));
assertEquals(metadata2, encryptionCtxDeser.getKeysMetadataMap().get(keyNames[1]));
assertEquals(properties, kinesisJsonResponse.getProperties());
}
|
@Override
public boolean load() {
boolean result = super.load();
result = result && this.parseDelayLevel();
result = result && this.correctDelayOffset();
return result;
}
|
@Test
public void testLoad() {
ConcurrentMap<Integer, Long> offsetTable = scheduleMessageService.getOffsetTable();
//offsetTable.put(0, 1L);
offsetTable.put(1, 3L);
offsetTable.put(2, 5L);
scheduleMessageService.persist();
ScheduleMessageService controlInstance = new ScheduleMessageService(brokerController);
assertTrue(controlInstance.load());
ConcurrentMap<Integer, Long> loaded = controlInstance.getOffsetTable();
for (long offset : loaded.values()) {
assertEquals(0, offset);
}
}
|
@Override
public void receiveSmsStatus(String channelCode, String text) throws Throwable {
// 获得渠道对应的 SmsClient 客户端
SmsClient smsClient = smsChannelService.getSmsClient(channelCode);
Assert.notNull(smsClient, "短信客户端({}) 不存在", channelCode);
// 解析内容
List<SmsReceiveRespDTO> receiveResults = smsClient.parseSmsReceiveStatus(text);
if (CollUtil.isEmpty(receiveResults)) {
return;
}
// 更新短信日志的接收结果. 因为量一般不大,所以先使用 for 循环更新
receiveResults.forEach(result -> smsLogService.updateSmsReceiveResult(result.getLogId(),
result.getSuccess(), result.getReceiveTime(), result.getErrorCode(), result.getErrorMsg()));
}
|
@Test
public void testReceiveSmsStatus() throws Throwable {
// 准备参数
String channelCode = randomString();
String text = randomString();
// mock SmsClientFactory 的方法
SmsClient smsClient = spy(SmsClient.class);
when(smsChannelService.getSmsClient(eq(channelCode))).thenReturn(smsClient);
// mock SmsClient 的方法
List<SmsReceiveRespDTO> receiveResults = randomPojoList(SmsReceiveRespDTO.class);
// 调用
smsSendService.receiveSmsStatus(channelCode, text);
// 断言
receiveResults.forEach(result -> smsLogService.updateSmsReceiveResult(eq(result.getLogId()), eq(result.getSuccess()),
eq(result.getReceiveTime()), eq(result.getErrorCode()), eq(result.getErrorCode())));
}
|
@SuppressWarnings("unchecked") // safe covariant cast
static <T extends @Nullable Object> Correspondence<T, T> equality() {
return (Equality<T>) Equality.INSTANCE;
}
|
@Test
public void testEquality_viaIterableSubjectContains_failure() {
expectFailure
.whenTesting()
.that(ImmutableList.of(1.01, 2.02, 3.03))
.comparingElementsUsing(equality())
.contains(2.0);
// N.B. No "testing whether" fact:
assertFailureKeys("expected to contain", "but was");
}
|
public Optional<BoolQueryBuilder> getPostFilters() {
return toBoolQuery(postFilters, (e, v) -> true);
}
|
@Test
public void getPostFilters_returns_empty_when_no_declared_sticky_topAggregation() {
AllFilters allFilters = randomNonEmptyAllFilters();
Set<TopAggregationDefinition<?>> atLeastOneNonStickyTopAggs = randomNonEmptyTopAggregations(() -> false);
RequestFiltersComputer underTest = new RequestFiltersComputer(allFilters, atLeastOneNonStickyTopAggs);
assertThat(underTest.getPostFilters()).isEmpty();
}
|
@SuppressWarnings({"unchecked", "UnstableApiUsage"})
@Override
public <T extends Statement> ConfiguredStatement<T> inject(
final ConfiguredStatement<T> statement) {
if (!(statement.getStatement() instanceof DropStatement)) {
return statement;
}
final DropStatement dropStatement = (DropStatement) statement.getStatement();
if (!dropStatement.isDeleteTopic()) {
return statement;
}
final SourceName sourceName = dropStatement.getName();
final DataSource source = metastore.getSource(sourceName);
if (source != null) {
if (source.isSource()) {
throw new KsqlException("Cannot delete topic for read-only source: " + sourceName.text());
}
checkTopicRefs(source);
deleteTopic(source);
final Closer closer = Closer.create();
closer.register(() -> deleteKeySubject(source));
closer.register(() -> deleteValueSubject(source));
try {
closer.close();
} catch (final KsqlException e) {
throw e;
} catch (final Exception e) {
throw new KsqlException(e);
}
} else if (!dropStatement.getIfExists()) {
throw new KsqlException("Could not find source to delete topic for: " + statement);
}
final T withoutDelete = (T) dropStatement.withoutDeleteClause();
final String withoutDeleteText = SqlFormatter.formatSql(withoutDelete) + ";";
return statement.withStatement(withoutDeleteText, withoutDelete);
}
|
@Test
public void shouldThrowOnDeleteTopicIfSourceIsReadOnly() {
// Given:
when(source.isSource()).thenReturn(true);
// When:
final Exception e = assertThrows(
RuntimeException.class,
() -> deleteInjector.inject(DROP_WITH_DELETE_TOPIC)
);
// Then:
assertThat(e.getMessage(), containsString("" +
"Cannot delete topic for read-only source: SOMETHING"));
}
|
public static Date jsToDate( Object value, String classType ) throws KettleValueException {
double dbl;
if ( !classType.equalsIgnoreCase( JS_UNDEFINED ) ) {
if ( classType.equalsIgnoreCase( "org.mozilla.javascript.NativeDate" ) ) {
dbl = Context.toNumber( value );
} else if ( classType.equalsIgnoreCase( JS_NATIVE_JAVA_OBJ )
|| classType.equalsIgnoreCase( "java.util.Date" ) ) {
// Is it a java Date() class ?
try {
Date dat = (Date) Context.jsToJava( value, java.util.Date.class );
dbl = dat.getTime();
} catch ( Exception e ) {
// Is it a Value?
//
return convertValueToDate( value );
}
} else if ( classType.equalsIgnoreCase( "java.lang.Double" ) ) {
dbl = (Double) value;
} else {
String string = (String) Context.jsToJava( value, String.class );
dbl = Double.parseDouble( string );
}
long lng = Math.round( dbl );
return new Date( lng );
}
return null;
}
|
@Test
public void jsToDate_String() throws Exception {
assertEquals( new Date( 1 ), JavaScriptUtils.jsToDate( "1.0", String.class.getName() ) );
}
|
public boolean eval(StructLike data) {
return new EvalVisitor().eval(data);
}
|
@Test
public void testAlwaysTrue() {
Evaluator evaluator = new Evaluator(STRUCT, alwaysTrue());
assertThat(evaluator.eval(TestHelpers.Row.of())).as("always true").isTrue();
}
|
public static String lowerUnderscoreToLowerCamelCase(String lowerUnderscore) {
StringBuilder builder = new StringBuilder();
int length = lowerUnderscore.length();
int index;
int fromIndex = 0;
while ((index = lowerUnderscore.indexOf('_', fromIndex)) != -1) {
builder.append(lowerUnderscore, fromIndex, index);
if (length >= index + 1) {
char symbol = lowerUnderscore.charAt(index + 1);
if (symbol >= 'a' && symbol <= 'z') {
builder.append(Character.toUpperCase(symbol));
fromIndex = index + 2;
continue;
}
}
fromIndex = index + 1;
}
if (fromIndex < length) {
builder.append(lowerUnderscore, fromIndex, length);
}
return builder.toString();
}
|
@Test
public void testLowerUnderscoreToLowerCamelCase() {
assertEquals(StringUtils.lowerUnderscoreToLowerCamelCase("some_variable"), "someVariable");
assertEquals(
StringUtils.lowerUnderscoreToLowerCamelCase("some_long_variable"), "someLongVariable");
assertEquals(
StringUtils.lowerUnderscoreToLowerCamelCase("some_123variable"), "some123variable");
assertEquals(
StringUtils.lowerUnderscoreToLowerCamelCase("some_variable123"), "someVariable123");
assertEquals(
StringUtils.lowerUnderscoreToLowerCamelCase("some_variable123"), "someVariable123");
assertEquals(
StringUtils.lowerUnderscoreToLowerCamelCase("some_123_variable"), "some123Variable");
assertEquals(
StringUtils.lowerUnderscoreToLowerCamelCase("some_variable_123"), "someVariable123");
}
|
static int toInteger(final JsonNode object) {
if (object instanceof NumericNode) {
return object.intValue();
}
if (object instanceof TextNode) {
try {
return Integer.parseInt(object.textValue());
} catch (final NumberFormatException e) {
throw failedStringCoercionException(SqlBaseType.INTEGER);
}
}
throw invalidConversionException(object, SqlBaseType.INTEGER);
}
|
@Test(expected = IllegalArgumentException.class)
public void shouldFailWhenConvertingIncompatibleLong() {
JsonSerdeUtils.toInteger(JsonNodeFactory.instance.booleanNode(true));
}
|
public MetadataReportBuilder appendParameters(Map<String, String> appendParameters) {
this.parameters = appendParameters(this.parameters, appendParameters);
return getThis();
}
|
@Test
void appendParameters() {
Map<String, String> source = new HashMap<>();
source.put("default.num", "one");
source.put("num", "ONE");
MetadataReportBuilder builder = new MetadataReportBuilder();
builder.appendParameters(source);
Map<String, String> parameters = builder.build().getParameters();
Assertions.assertTrue(parameters.containsKey("default.num"));
Assertions.assertEquals("ONE", parameters.get("num"));
}
|
List<ParsedTerm> identifyUnknownFields(final Set<String> availableFields, final List<ParsedTerm> terms) {
final Map<String, List<ParsedTerm>> groupedByField = terms.stream()
.filter(t -> !t.isDefaultField())
.filter(term -> !SEARCHABLE_ES_FIELDS.contains(term.getRealFieldName()))
.filter(term -> !RESERVED_SETTABLE_FIELDS.contains(term.getRealFieldName()))
.filter(term -> !availableFields.contains(term.getRealFieldName()))
.distinct()
.collect(Collectors.groupingBy(ParsedTerm::getRealFieldName));
return unknownFieldsListLimiter.filterElementsContainingUsefulInformation(groupedByField);
}
|
@Test
void testDoesNotIdentifySpecialIdFieldAsUnknown() {
final List<ParsedTerm> unknownFields = toTest.identifyUnknownFields(
Set.of("some_normal_field"),
List.of(ParsedTerm.create("_id", "buba"))
);
assertTrue(unknownFields.isEmpty());
}
|
public static int getLevelOfNode(int nodeOrder) {
return QuickMath.log2(nodeOrder + 1);
}
|
@Test
public void testGetLevelOfNode() {
assertEquals(0, MerkleTreeUtil.getLevelOfNode(0));
assertEquals(1, MerkleTreeUtil.getLevelOfNode(1));
assertEquals(1, MerkleTreeUtil.getLevelOfNode(2));
assertEquals(2, MerkleTreeUtil.getLevelOfNode(3));
assertEquals(2, MerkleTreeUtil.getLevelOfNode(6));
assertEquals(3, MerkleTreeUtil.getLevelOfNode(7));
assertEquals(3, MerkleTreeUtil.getLevelOfNode(14));
assertEquals(4, MerkleTreeUtil.getLevelOfNode(15));
assertEquals(4, MerkleTreeUtil.getLevelOfNode(30));
}
|
@Override
protected HttpApiSpecificInfo doParse(final ApiBean.ApiDefinition apiDefinition) {
RequestMapping requestMapping = apiDefinition.getAnnotation(RequestMapping.class);
String produce = requestMapping.produces().length == 0 ? ShenyuClientConstants.MEDIA_TYPE_ALL_VALUE : String.join(",", requestMapping.produces());
String consume = requestMapping.consumes().length == 0 ? ShenyuClientConstants.MEDIA_TYPE_ALL_VALUE : String.join(",", requestMapping.consumes());
RequestMethod[] requestMethods = requestMapping.method();
if (requestMethods.length == 0) {
requestMethods = RequestMethod.values();
}
List<ApiHttpMethodEnum> apiHttpMethodEnums =
Stream.of(requestMethods)
.map(item -> ApiHttpMethodEnum.of(item.name()))
.collect(Collectors.toList());
return new HttpApiSpecificInfo(produce, consume, apiHttpMethodEnums);
}
|
@Test
public void testDoParse() throws Exception {
ApiBean apiBean = createSimpleApiBean();
AbstractApiDocRegistrar.HttpApiSpecificInfo httpApiSpecificInfo =
httpApiDocRegistrar.doParse(apiBean.getApiDefinitions().get(0));
List<ApiHttpMethodEnum> apiHttpMethodEnums =
Lists.newArrayList(ApiHttpMethodEnum.values());
apiHttpMethodEnums.remove(ApiHttpMethodEnum.NOT_HTTP);
assertThat(httpApiSpecificInfo.getApiHttpMethodEnums(), Matchers.contains(
apiHttpMethodEnums.toArray(new ApiHttpMethodEnum[0])));
assertThat(httpApiSpecificInfo.getConsume(), Matchers.is("application/json"));
assertThat(httpApiSpecificInfo.getProduce(), Matchers.is("application/json"));
}
|
public void replaceAction(@NonNull Action a) {
addOrReplaceAction(a);
}
|
@Test
public void replaceAction_null() {
assertThrows(IllegalArgumentException.class, () -> thing.replaceAction(null));
}
|
@Override
public int getOrder() {
return PluginEnum.MOTAN.getCode();
}
|
@Test
public void testGetOrder() {
Assertions.assertEquals(motanPlugin.getOrder(), 310);
}
|
@Override
public ContinuousEnumerationResult planSplits(IcebergEnumeratorPosition lastPosition) {
table.refresh();
if (lastPosition != null) {
return discoverIncrementalSplits(lastPosition);
} else {
return discoverInitialSplits();
}
}
|
@Test
public void testIncrementalFromSnapshotIdWithEmptyTable() {
ScanContext scanContextWithInvalidSnapshotId =
ScanContext.builder()
.startingStrategy(StreamingStartingStrategy.INCREMENTAL_FROM_SNAPSHOT_ID)
.startSnapshotId(1L)
.build();
ContinuousSplitPlannerImpl splitPlanner =
new ContinuousSplitPlannerImpl(
TABLE_RESOURCE.tableLoader().clone(), scanContextWithInvalidSnapshotId, null);
assertThatThrownBy(() -> splitPlanner.planSplits(null))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Start snapshot id not found in history: 1");
}
|
@Override
public void configure(Map<String, ?> configs) {
PushHttpMetricsReporterConfig config = new PushHttpMetricsReporterConfig(CONFIG_DEF, configs);
try {
url = new URL(config.getString(METRICS_URL_CONFIG));
} catch (MalformedURLException e) {
throw new ConfigException("Malformed metrics.url", e);
}
int period = config.getInteger(METRICS_PERIOD_CONFIG);
clientId = config.getString(CLIENT_ID_CONFIG);
host = config.getString(METRICS_HOST_CONFIG);
if (host == null || host.isEmpty()) {
try {
host = InetAddress.getLocalHost().getCanonicalHostName();
} catch (UnknownHostException e) {
throw new ConfigException("Failed to get canonical hostname", e);
}
}
executor.scheduleAtFixedRate(new HttpReporter(), period, period, TimeUnit.SECONDS);
log.info("Configured PushHttpMetricsReporter for {} to report every {} seconds", url, period);
}
|
@Test
public void testConfigureMissingPeriod() {
Map<String, String> config = new HashMap<>();
config.put(PushHttpMetricsReporter.METRICS_URL_CONFIG, URL.toString());
assertThrows(ConfigException.class, () -> reporter.configure(config));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.