focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@SuppressWarnings("nullness") @VisibleForTesting public ProcessContinuation run( PartitionMetadata partition, RestrictionTracker<TimestampRange, Timestamp> tracker, OutputReceiver<DataChangeRecord> receiver, ManualWatermarkEstimator<Instant> watermarkEstimator, BundleFinalizer bundleFinalizer) { final String token = partition.getPartitionToken(); final Timestamp startTimestamp = tracker.currentRestriction().getFrom(); final Timestamp endTimestamp = partition.getEndTimestamp(); // TODO: Potentially we can avoid this fetch, by enriching the runningAt timestamp when the // ReadChangeStreamPartitionDoFn#processElement is called final PartitionMetadata updatedPartition = Optional.ofNullable(partitionMetadataDao.getPartition(token)) .map(partitionMetadataMapper::from) .orElseThrow( () -> new IllegalStateException( "Partition " + token + " not found in metadata table")); try (ChangeStreamResultSet resultSet = changeStreamDao.changeStreamQuery( token, startTimestamp, endTimestamp, partition.getHeartbeatMillis())) { metrics.incQueryCounter(); while (resultSet.next()) { final List<ChangeStreamRecord> records = changeStreamRecordMapper.toChangeStreamRecords( updatedPartition, resultSet, resultSet.getMetadata()); Optional<ProcessContinuation> maybeContinuation; for (final ChangeStreamRecord record : records) { if (record instanceof DataChangeRecord) { maybeContinuation = dataChangeRecordAction.run( updatedPartition, (DataChangeRecord) record, tracker, receiver, watermarkEstimator); } else if (record instanceof HeartbeatRecord) { maybeContinuation = heartbeatRecordAction.run( updatedPartition, (HeartbeatRecord) record, tracker, watermarkEstimator); } else if (record instanceof ChildPartitionsRecord) { maybeContinuation = childPartitionsRecordAction.run( updatedPartition, (ChildPartitionsRecord) record, tracker, watermarkEstimator); } else { LOG.error("[{}] Unknown record type {}", token, record.getClass()); throw new IllegalArgumentException("Unknown record type " + record.getClass()); } if (maybeContinuation.isPresent()) { LOG.debug("[{}] Continuation present, returning {}", token, maybeContinuation); bundleFinalizer.afterBundleCommit( Instant.now().plus(BUNDLE_FINALIZER_TIMEOUT), updateWatermarkCallback(token, watermarkEstimator)); return maybeContinuation.get(); } } } bundleFinalizer.afterBundleCommit( Instant.now().plus(BUNDLE_FINALIZER_TIMEOUT), updateWatermarkCallback(token, watermarkEstimator)); } catch (SpannerException e) { /* If there is a split when a partition is supposed to be finished, the residual will try to perform a change stream query for an out of range interval. We ignore this error here, and the residual should be able to claim the end of the timestamp range, finishing the partition. */ if (isTimestampOutOfRange(e)) { LOG.info( "[{}] query change stream is out of range for {} to {}, finishing stream.", token, startTimestamp, endTimestamp, e); } else { throw e; } } catch (Exception e) { LOG.error( "[{}] query change stream had exception processing range {} to {}.", token, startTimestamp, endTimestamp, e); throw e; } LOG.debug("[{}] change stream completed successfully", token); if (tracker.tryClaim(endTimestamp)) { LOG.debug("[{}] Finishing partition", token); partitionMetadataDao.updateToFinished(token); metrics.decActivePartitionReadCounter(); LOG.info("[{}] After attempting to finish the partition", token); } return ProcessContinuation.stop(); }
@Test public void testQueryChangeStreamWithHeartbeatRecord() { final Struct rowAsStruct = mock(Struct.class); final ChangeStreamResultSetMetadata resultSetMetadata = mock(ChangeStreamResultSetMetadata.class); final ChangeStreamResultSet resultSet = mock(ChangeStreamResultSet.class); final HeartbeatRecord record1 = mock(HeartbeatRecord.class); final HeartbeatRecord record2 = mock(HeartbeatRecord.class); when(record1.getRecordTimestamp()).thenReturn(PARTITION_START_TIMESTAMP); when(record2.getRecordTimestamp()).thenReturn(PARTITION_START_TIMESTAMP); when(changeStreamDao.changeStreamQuery( PARTITION_TOKEN, PARTITION_START_TIMESTAMP, PARTITION_END_TIMESTAMP, PARTITION_HEARTBEAT_MILLIS)) .thenReturn(resultSet); when(resultSet.next()).thenReturn(true); when(resultSet.getCurrentRowAsStruct()).thenReturn(rowAsStruct); when(resultSet.getMetadata()).thenReturn(resultSetMetadata); when(changeStreamRecordMapper.toChangeStreamRecords(partition, resultSet, resultSetMetadata)) .thenReturn(Arrays.asList(record1, record2)); when(heartbeatRecordAction.run(partition, record1, restrictionTracker, watermarkEstimator)) .thenReturn(Optional.empty()); when(heartbeatRecordAction.run(partition, record2, restrictionTracker, watermarkEstimator)) .thenReturn(Optional.of(ProcessContinuation.stop())); when(watermarkEstimator.currentWatermark()).thenReturn(WATERMARK); final ProcessContinuation result = action.run( partition, restrictionTracker, outputReceiver, watermarkEstimator, bundleFinalizer); assertEquals(ProcessContinuation.stop(), result); verify(heartbeatRecordAction).run(partition, record1, restrictionTracker, watermarkEstimator); verify(heartbeatRecordAction).run(partition, record2, restrictionTracker, watermarkEstimator); verify(partitionMetadataDao).updateWatermark(PARTITION_TOKEN, WATERMARK_TIMESTAMP); verify(dataChangeRecordAction, never()).run(any(), any(), any(), any(), any()); verify(childPartitionsRecordAction, never()).run(any(), any(), any(), any()); verify(restrictionTracker, never()).tryClaim(any()); }
@Override public ClusterClientProvider<ApplicationId> deployApplicationCluster( final ClusterSpecification clusterSpecification, final ApplicationConfiguration applicationConfiguration) throws ClusterDeploymentException { checkNotNull(clusterSpecification); checkNotNull(applicationConfiguration); final YarnDeploymentTarget deploymentTarget = YarnDeploymentTarget.fromConfig(flinkConfiguration); if (YarnDeploymentTarget.APPLICATION != deploymentTarget) { throw new ClusterDeploymentException( "Couldn't deploy Yarn Application Cluster." + " Expected deployment.target=" + YarnDeploymentTarget.APPLICATION.getName() + " but actual one was \"" + deploymentTarget.getName() + "\""); } applicationConfiguration.applyToConfiguration(flinkConfiguration); // No need to do pipelineJars validation if it is a PyFlink job. if (!(PackagedProgramUtils.isPython(applicationConfiguration.getApplicationClassName()) || PackagedProgramUtils.isPython(applicationConfiguration.getProgramArguments()))) { final List<String> pipelineJars = flinkConfiguration .getOptional(PipelineOptions.JARS) .orElse(Collections.emptyList()); Preconditions.checkArgument(pipelineJars.size() == 1, "Should only have one jar"); } try { return deployInternal( clusterSpecification, "Flink Application Cluster", YarnApplicationClusterEntryPoint.class.getName(), null, false); } catch (Exception e) { throw new ClusterDeploymentException("Couldn't deploy Yarn Application Cluster", e); } }
@Test void testDeployApplicationClusterWithDeploymentTargetNotCorrectlySet() { final Configuration flinkConfig = new Configuration(); flinkConfig.set( PipelineOptions.JARS, Collections.singletonList("file:///path/of/user.jar")); flinkConfig.set(DeploymentOptions.TARGET, YarnDeploymentTarget.SESSION.getName()); try (final YarnClusterDescriptor yarnClusterDescriptor = createYarnClusterDescriptor(flinkConfig)) { assertThrows( "Expected deployment.target=yarn-application", ClusterDeploymentException.class, () -> yarnClusterDescriptor.deployApplicationCluster( clusterSpecification, appConfig)); } }
@Override public void accept(MeterEntity entity, Long value) { setEntityId(entity.id()); setServiceId(entity.serviceId()); setTotal(getTotal() + value); }
@Test public void testAccept() { long time1 = 1597113318673L; function.accept(MeterEntity.newService("sum_sync_time", Layer.GENERAL), time1); function.calculate(); assertThat(function.getValue()).isEqualTo(time1); long time2 = 1597113447737L; function.accept(MeterEntity.newService("sum_sync_time", Layer.GENERAL), time2); function.calculate(); assertThat(function.getValue()).isEqualTo(time1 + time2); }
@Udf public String extractFragment( @UdfParameter( value = "input", description = "a valid URL to extract a fragment from") final String input) { return UrlParser.extract(input, URI::getFragment); }
@Test public void shouldExtractFragmentIfPresent() { assertThat(extractUdf.extractFragment("https://docs.confluent.io/current/ksql/docs/syntax-reference.html#scalar-functions"), equalTo("scalar-functions")); }
public Optional<Violation> validate(IndexSetConfig newConfig) { // Don't validate prefix conflicts in case of an update if (Strings.isNullOrEmpty(newConfig.id())) { final Violation prefixViolation = validatePrefix(newConfig); if (prefixViolation != null) { return Optional.of(prefixViolation); } } final Violation fieldMappingViolation = validateMappingChangesAreLegal(newConfig); if (fieldMappingViolation != null) { return Optional.of(fieldMappingViolation); } Violation refreshIntervalViolation = validateSimpleIndexSetConfig(newConfig); if (refreshIntervalViolation != null){ return Optional.of(refreshIntervalViolation); } return Optional.empty(); }
@Test public void validateWhenAlreadyManaged() { final String prefix = "graylog_index"; final IndexSetConfig newConfig = mock(IndexSetConfig.class); when(indexSetRegistry.isManagedIndex("graylog_index_0")).thenReturn(true); when(newConfig.indexPrefix()).thenReturn(prefix); final Optional<IndexSetValidator.Violation> violation = validator.validate(newConfig); assertThat(violation).isPresent(); }
@Override public <KEY> URIMappingResult<KEY> mapUris(List<URIKeyPair<KEY>> requestUriKeyPairs) throws ServiceUnavailableException { if (requestUriKeyPairs == null || requestUriKeyPairs.isEmpty()) { return new URIMappingResult<>(Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap()); } // API assumes that all requests will be made to the same service, just use the first request to get the service name and act as sample uri URI sampleURI = requestUriKeyPairs.get(0).getRequestUri(); String serviceName = LoadBalancerUtil.getServiceNameFromUri(sampleURI); // To achieve scatter-gather, we require the following information PartitionAccessor accessor = _partitionInfoProvider.getPartitionAccessor(serviceName); Map<Integer, Ring<URI>> rings = _hashRingProvider.getRings(sampleURI); HashFunction<Request> hashFunction = _hashRingProvider.getRequestHashFunction(serviceName); Map<Integer, Set<KEY>> unmapped = new HashMap<>(); // Pass One Map<Integer, List<URIKeyPair<KEY>>> requestsByPartition = distributeToPartitions(requestUriKeyPairs, accessor, unmapped); // Pass Two Map<URI, Integer> hostToParitionId = new HashMap<>(); Map<URI, Set<KEY>> hostToKeySet = distributeToHosts(requestsByPartition, rings, hashFunction, hostToParitionId, unmapped); return new URIMappingResult<>(hostToKeySet, unmapped, hostToParitionId); }
@Test public void testSameHostSupportingMultiplePartitions() throws ServiceUnavailableException { int partitionCount = 10; int requestPerPartition = 100; // one host supporting 10 partitions URI host = createHostURI(0, 0); List<Ring<URI>> rings = IntStream.range(0, partitionCount) .boxed() .map(i -> new MPConsistentHashRing<>(Collections.singletonMap(host, 100))) .collect(Collectors.toList()); StaticRingProvider ringProvider = new StaticRingProvider(rings); ringProvider.setHashFunction(new RandomHash()); PartitionInfoProvider infoProvider = createRangeBasedPartitionInfoProvider(partitionCount); URIMapper mapper = new RingBasedUriMapper(ringProvider, infoProvider); List<URIKeyPair<Integer>> requests = testUtil.generateRequests(partitionCount, requestPerPartition); URIMappingResult<Integer> results = mapper.mapUris(requests); Map<URI, Set<Integer>> mapping = results.getMappedKeys(); Map<Integer, Set<Integer>> unmappedKeys = results.getUnmappedKeys(); Assert.assertTrue(unmappedKeys.isEmpty()); Assert.assertEquals(1, mapping.size()); Assert.assertEquals(1000, mapping.values().iterator().next().size()); }
public String compress(String compressorName, String uncompressedString) throws IOException { Checks.notNull(uncompressedString, "uncompressedString cannot be null"); Compressor compressor = getCompressor(compressorName == null ? DEFAULT_COMPRESSOR_NAME : compressorName); return base64Encode(compressor.compress(uncompressedString.getBytes(DEFAULT_ENCODING))); }
@Test public void compressShouldThrowExceptionIfCompressorNotFound() { AssertHelper.assertThrows( "compress should throw exception if compressor not found", NullPointerException.class, "unknown compressorName: abcd", () -> stringCodec.compress("abcd", "testValue")); }
@Override public DescriptiveUrlBag toUrl(final Path file) { final DescriptiveUrlBag list = new DescriptiveUrlBag(); if(file.attributes().getLink() != DescriptiveUrl.EMPTY) { list.add(file.attributes().getLink()); } return list; }
@Test public void testToUrl() throws Exception { final DriveUrlProvider provider = new DriveUrlProvider(); final Path test = new Path(DriveHomeFinderService.MYDRIVE_FOLDER, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); assertNotNull(provider.toUrl(test)); assertTrue(provider.toUrl(test).isEmpty()); new DriveTouchFeature(session, new DriveFileIdProvider(session)).touch(test, new TransferStatus()); // assertFalse(provider.toDownloadUrl(test).isEmpty()); new DriveDeleteFeature(session, new DriveFileIdProvider(session)).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public static SchemaKStream<?> buildSource( final PlanBuildContext buildContext, final DataSource dataSource, final QueryContext.Stacker contextStacker ) { final boolean windowed = dataSource.getKsqlTopic().getKeyFormat().isWindowed(); switch (dataSource.getDataSourceType()) { case KSTREAM: return windowed ? buildWindowedStream( buildContext, dataSource, contextStacker ) : buildStream( buildContext, dataSource, contextStacker ); case KTABLE: return windowed ? buildWindowedTable( buildContext, dataSource, contextStacker ) : buildTable( buildContext, dataSource, contextStacker ); default: throw new UnsupportedOperationException("Source type:" + dataSource.getDataSourceType()); } }
@Test public void shouldReplaceWindowedStreamSourceWithMatchingPseudoColumnVersion() { // Given: givenWindowedStream(); givenExistingQueryWithOldPseudoColumnVersion(windowedStreamSource); // When: final SchemaKStream<?> result = SchemaKSourceFactory.buildSource( buildContext, dataSource, contextStacker ); // Then: assertThat(((WindowedStreamSource) result.getSourceStep()).getPseudoColumnVersion(), equalTo(LEGACY_PSEUDOCOLUMN_VERSION_NUMBER)); assertValidSchema(result); }
@Override public ExplodedPlugin explode(PluginInfo plugin) { File toDir = new File(fs.getDeployedPluginsDir(), plugin.getKey()); try { forceMkdir(toDir); org.sonar.core.util.FileUtils.cleanDirectory(toDir); File jarTarget = new File(toDir, plugin.getNonNullJarFile().getName()); FileUtils.copyFile(plugin.getNonNullJarFile(), jarTarget); ZipUtils.unzip(plugin.getNonNullJarFile(), toDir, newLibFilter()); return explodeFromUnzippedDir(plugin, jarTarget, toDir); } catch (Exception e) { throw new IllegalStateException(String.format( "Fail to unzip plugin [%s] %s to %s", plugin.getKey(), plugin.getNonNullJarFile().getAbsolutePath(), toDir.getAbsolutePath()), e); } }
@Test public void copy_all_classloader_files_to_dedicated_directory() throws Exception { File deployDir = temp.newFolder(); when(fs.getDeployedPluginsDir()).thenReturn(deployDir); File sourceJar = TestProjectUtils.jarOf("test-libs-plugin"); PluginInfo info = PluginInfo.create(sourceJar); ExplodedPlugin exploded = underTest.explode(info); // all the files loaded by classloaders (JAR + META-INF/libs/*.jar) are copied to the dedicated directory // web/deploy/{pluginKey} File pluginDeployDir = new File(deployDir, "testlibs"); assertThat(exploded.getKey()).isEqualTo("testlibs"); assertThat(exploded.getMain()).isFile().exists().hasParent(pluginDeployDir); assertThat(exploded.getLibs()).extracting("name").containsOnly("commons-daemon-1.0.15.jar", "commons-email-20030310.165926.jar"); for (File lib : exploded.getLibs()) { assertThat(lib).exists().isFile(); assertThat(lib.getCanonicalPath()).startsWith(pluginDeployDir.getCanonicalPath()); } File targetJar = new File(fs.getDeployedPluginsDir(), "testlibs/test-libs-plugin-0.1-SNAPSHOT.jar"); }
@Override public Mono<Void> execute(final ServerWebExchange exchange, final ShenyuPluginChain chain) { ShenyuContext shenyuContext = builder.build(exchange); exchange.getAttributes().put(Constants.CONTEXT, shenyuContext); return chain.execute(exchange); }
@Test public void testExecuted() { this.globalPlugin.execute(this.exchange, this.chain); assertNotNull(this.exchange.getAttributes().get(Constants.CONTEXT)); this.exchange = MockServerWebExchange.from(MockServerHttpRequest.get("http://localhost:8080/http") .remoteAddress(new InetSocketAddress(8091)) .header(UPGRADE, "websocket") .build()); this.globalPlugin.execute(this.exchange, this.chain); assertNotNull(this.exchange.getAttributes().get(Constants.CONTEXT)); }
@Override public Object result(RpcException e) { // javax dependency judge if (violationDependency()) { // ConstraintViolationException judge if (ConstraintViolationExceptionConvert.needConvert(e)) { return ConstraintViolationExceptionConvert.handleConstraintViolationException(e); } } return "Internal server error: " + e.getMessage(); }
@Test void testNormalException() { RpcException rpcException = new RpcException(); Object response = exceptionMapper.result(rpcException); assertThat(response, not(nullValue())); assertThat(response, instanceOf(String.class)); }
static long[] getMemoryUsage(VespaService service) { BufferedReader br; int pid = service.getPid(); try { br = new BufferedReader(new FileReader("/proc/" + pid + "/smaps")); } catch (FileNotFoundException ex) { service.setAlive(false); return new long[2]; } try { return getMemoryUsage(br); } catch (IOException ex) { log.log(Level.FINE, "Unable to read line from smaps file", ex); return new long[2]; } finally { try { br.close(); } catch (IOException ex) { log.log(Level.FINE, "Closing of smaps file failed", ex); } } }
@Ignore @Test public void benchmarkSmapsParsing() throws IOException { for (int i=0; i < 100000; i++) { BufferedReader br = new BufferedReader(new StringReader(smaps)); long[] memusage = SystemPoller.getMemoryUsage(br); assertEquals(913408L, memusage[0]); assertEquals(847872L, memusage[1]); } }
@Override public ConfigErrors errors() { return configErrors; }
@Test public void validate_shouldMakeSureParamNameIsOfNameType() { assertThat(createAndValidate("name").errors().isEmpty(), is(true)); ConfigErrors errors = createAndValidate(".name").errors(); assertThat(errors.isEmpty(), is(false)); assertThat(errors.on(ParamConfig.NAME), is("Invalid parameter name '.name'. This must be alphanumeric and can contain underscores, hyphens and periods (however, it cannot start with a period). The maximum allowed length is 255 characters.")); }
public abstract int status(HttpServletResponse response);
@Test void servlet25_status_doesntParseLocalTypes() { // while looks nice, this will overflow our cache class LocalResponse extends HttpServletResponseImpl { } assertThat(servlet25.status(new LocalResponse())) .isZero(); }
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) { return api.send(request); }
@Test public void sendAudio() { Message message = bot.execute(new SendAudio(chatId, audioFileId) .caption("caption").captionEntities(new MessageEntity(MessageEntity.Type.italic, 0, 7)) ).message(); MessageTest.checkMessage(message); AudioTest.checkAudio(message.audio(), false); MessageEntity captionEntity = message.captionEntities()[0]; assertEquals(MessageEntity.Type.italic, captionEntity.type()); assertEquals((Integer) 0, captionEntity.offset()); assertEquals((Integer) 7, captionEntity.length()); message = bot.execute(new SendAudio(chatId, audioFile).thumbnail(thumbFile)).message(); MessageTest.checkMessage(message); AudioTest.checkAudio(message.audio()); assertEquals(thumbSize, message.audio().thumbnail().fileSize()); String cap = "http://ya.ru <b>bold</b> #audio @pengrad_test_bot", title = "title", performer = "performer"; ParseMode parseMode = ParseMode.HTML; int duration = 100; SendAudio sendAudio = new SendAudio(chatId, audioBytes).thumbnail(thumbBytes).duration(duration) .caption(cap).parseMode(parseMode).performer(performer).title(title); message = bot.execute(sendAudio).message(); MessageTest.checkMessage(message); Audio audio = message.audio(); AudioTest.checkAudio(audio); assertEquals(cap.replace("<b>", "").replace("</b>", ""), message.caption()); assertEquals((Integer) 100, audio.duration()); assertEquals(performer, audio.performer()); assertEquals(title, audio.title()); assertEquals(thumbSize, audio.thumbnail().fileSize()); captionEntity = message.captionEntities()[0]; assertEquals(MessageEntity.Type.url, captionEntity.type()); assertEquals((Integer) 0, captionEntity.offset()); assertEquals((Integer) 12, captionEntity.length()); captionEntity = message.captionEntities()[1]; assertEquals(MessageEntity.Type.bold, captionEntity.type()); assertEquals((Integer) 14, captionEntity.offset()); assertEquals((Integer) 4, captionEntity.length()); assertEquals(MessageEntity.Type.hashtag, message.captionEntities()[2].type()); }
InvocationCallback queuePoll() { InvocationCallback element; synchronized (this) { if (tail != head) { element = elements[head & mask]; head++; } else { element = null; frozen = true; } } return element; }
@Test(dataProvider = "offsets") public void testExpandCapacity(int splitOffset) throws Throwable { CompletableFuture<Object> future = new CompletableFuture<>(); QueueAsyncInvocationStage stage = new QueueAsyncInvocationStage(null, null, future, makeCallback(0)); assertCallback(0, stage.queuePoll()); addAndPoll(stage, splitOffset); // Now trigger 2 expansions int count = 2 * QueueAsyncInvocationStage.QUEUE_INITIAL_CAPACITY; addAndPoll(stage, count); }
public Set<Analysis.AliasedDataSource> extractDataSources(final AstNode node) { new Visitor().process(node, null); return getAllSources(); }
@Test public void shouldExtractUnaliasedJoinDataSources() { // Given: final AstNode stmt = givenQuery("SELECT * FROM TEST1 JOIN TEST2" + " ON test1.col1 = test2.col1;"); // When: extractor.extractDataSources(stmt); // Then: assertContainsAlias(TEST1, TEST2); }
@Override public boolean match(Message msg, StreamRule rule) { if (msg.getField(rule.getField()) == null) { return rule.getInverted(); } final String value = msg.getField(rule.getField()).toString(); return rule.getInverted() ^ value.trim().equals(rule.getValue()); }
@Test public void testNonExistantField() { StreamRule rule = getSampleRule(); Message msg = getSampleMessage(); msg.addField("someother", "foo"); StreamRuleMatcher matcher = getMatcher(rule); assertFalse(matcher.match(msg, rule)); }
@Override public void checkServerTrusted(X509Certificate[] chain, String authType) throws CertificateException { try { if (defaultTrustManager != null) { defaultTrustManager.checkServerTrusted(chain, authType); } } catch (CertificateException ce) { // If the certificate chain couldn't be verified using the default trust manager, // try verifying the same with the user-provided root CA if (userTrustManager != null) { userTrustManager.checkServerTrusted(chain, authType); } } }
@Test(expected = Exception.class) public void testCustomX509TrustManagerWithUnrecognizedCertificate() throws CertificateException { customTrustManager.checkServerTrusted( new X509Certificate[] {unrecognizedSelfSignedCertificate}, "RSA"); }
TaskSpec rebaseTaskSpecTime(TaskSpec spec) throws Exception { ObjectNode node = JsonUtil.JSON_SERDE.valueToTree(spec); node.set("startMs", new LongNode(Math.max(time.milliseconds(), spec.startMs()))); return JsonUtil.JSON_SERDE.treeToValue(node, TaskSpec.class); }
@Test public void testAgentExecWithNormalExit() throws Exception { Agent agent = createAgent(Scheduler.SYSTEM); SampleTaskSpec spec = new SampleTaskSpec(0, 120000, Collections.singletonMap("node01", 1L), ""); TaskSpec rebasedSpec = agent.rebaseTaskSpecTime(spec); testExec(agent, String.format("Waiting for completion of task:%s%n", JsonUtil.toPrettyJsonString(rebasedSpec)) + String.format("Task succeeded with status \"halted\"%n"), true, rebasedSpec); }
public Analysis analyze(Statement statement) { return analyze(statement, false); }
@Test public void testGroupByWithRowExpression() { // TODO: verify output analyze("SELECT (a, b) FROM t1 GROUP BY a, b"); }
public static VisionModel V2S() { return V2S("model/EfficientNet/efficientnet_v2_s.pt"); }
@Test public void train() throws IOException { Device device = Device.CUDA((byte) 1); device.setDefaultDevice(); // half precision to lower memory usage. var dtype = ScalarType.BFloat16; var model = EfficientNet.V2S(); model.to(device, dtype); var transform = Transform.classification(384, 384); var data = new ImageDataset(64, "../imagenet/train", transform, ImageNet.folder2Target); var test = new ImageDataset(16, "../imagenet/val", transform, ImageNet.folder2Target); var schedule = TimeFunction.piecewise(new int[] { 50000 }, TimeFunction.linear(0.0001, 50000, 0.01), TimeFunction.cosine(0.0001, 50000, 0.01)); model.setLearningRateSchedule(schedule); // Use parameters from the paper, the rests are Keras default values. // Note that Keras has different default values from PyTorch (e.g. alpha and eps). Optimizer optimizer = Optimizer.RMSprop(model, 0.0001, 0.9, 1E-07, 1E-05, 0.9, false); model.train(5, optimizer, Loss.nll(), data, test, null, new Accuracy()); }
public static boolean acceptEndpoint(String endpointUrl) { return endpointUrl != null && endpointUrl.matches(ENDPOINT_PATTERN_STRING); }
@Test void testAcceptEndpoint() { assertTrue(GooglePubSubMessageConsumptionTask.acceptEndpoint("googlepubsub://my-own-project-id/my-topic")); }
public static boolean validMagicNumbers( final BufferedInputStream bin ) throws IOException { final List<String> validMagicBytesCollection = JiveGlobals.getListProperty( "plugins.upload.magic-number.values.expected-value", Arrays.asList( "504B0304", "504B0506", "504B0708" ) ); for ( final String entry : validMagicBytesCollection ) { final byte[] validMagicBytes = StringUtils.decodeHex( entry ); bin.mark( validMagicBytes.length ); try { final byte[] magicBytes = new byte[validMagicBytes.length]; int remaining = validMagicBytes.length; while (remaining > 0) { final int location = validMagicBytes.length - remaining; final int count = bin.read(magicBytes, location, remaining); if (count == -1) { break; } remaining -= count; } if ( remaining <= 0 && Arrays.equals( validMagicBytes, magicBytes ) ) { return true; } } finally { bin.reset(); } } return false; }
@Test public void testJARMagicBytes() throws Exception { // Setup test fixture. try (final InputStream inputStream = getClass().getClassLoader().getResourceAsStream("hello.jar")) { assert inputStream != null; try (final BufferedInputStream in = new BufferedInputStream(inputStream)) { // Execute system under test final boolean result = PluginManager.validMagicNumbers( in ); // Verify results. assertTrue( result ); } } }
public boolean setGap(DefaultIssue issue, @Nullable Double d, IssueChangeContext context) { if (!Objects.equals(d, issue.gap())) { issue.setGap(d); issue.setUpdateDate(context.date()); issue.setChanged(true); // Do not send notifications to prevent spam when installing the SQALE plugin, // and do not complete the changelog (for the moment) return true; } return false; }
@Test void not_set_gap_to_fix_if_unchanged() { issue.setGap(3.14); boolean updated = underTest.setGap(issue, 3.14, context); assertThat(updated).isFalse(); assertThat(issue.isChanged()).isFalse(); assertThat(issue.gap()).isEqualTo(3.14); assertThat(issue.mustSendNotifications()).isFalse(); }
public ValidationResult validateMessagesAndAssignOffsets(PrimitiveRef.LongRef offsetCounter, MetricsRecorder metricsRecorder, BufferSupplier bufferSupplier) { if (sourceCompressionType == CompressionType.NONE && targetCompression.type() == CompressionType.NONE) { // check the magic value if (!records.hasMatchingMagic(toMagic)) return convertAndAssignOffsetsNonCompressed(offsetCounter, metricsRecorder); else // Do in-place validation, offset assignment and maybe set timestamp return assignOffsetsNonCompressed(offsetCounter, metricsRecorder); } else return validateMessagesAndAssignOffsetsCompressed(offsetCounter, metricsRecorder, bufferSupplier); }
@Test public void testOffsetAssignmentAfterUpConversionV1ToV2NonCompressed() { MemoryRecords records = createRecords(RecordBatch.MAGIC_VALUE_V1, RecordBatch.NO_TIMESTAMP, Compression.NONE); checkOffsets(records, 0); long offset = 1234567; checkOffsets(new LogValidator( records, new TopicPartition("topic", 0), time, CompressionType.NONE, Compression.NONE, false, RecordBatch.MAGIC_VALUE_V2, TimestampType.LOG_APPEND_TIME, 1000L, 1000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, AppendOrigin.CLIENT, MetadataVersion.latestTesting() ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() ).validatedRecords, offset); }
@Override public long getSetOperationCount() { throw new UnsupportedOperationException("Set operation on replicated maps is not supported."); }
@Test(expected = UnsupportedOperationException.class) public void testSetOperationCount() { localReplicatedMapStats.getSetOperationCount(); }
public void convertQueueHierarchy(FSQueue queue) { List<FSQueue> children = queue.getChildQueues(); final String queueName = queue.getName(); emitChildQueues(queueName, children); emitMaxAMShare(queueName, queue); emitMaxParallelApps(queueName, queue); emitMaxAllocations(queueName, queue); emitPreemptionDisabled(queueName, queue); emitChildCapacity(queue); emitMaximumCapacity(queueName, queue); emitSizeBasedWeight(queueName); emitOrderingPolicy(queueName, queue); checkMaxChildCapacitySetting(queue); emitDefaultUserLimitFactor(queueName, children); for (FSQueue childQueue : children) { convertQueueHierarchy(childQueue); } }
@Test public void testQueueMinimumCapacity() { converter = builder.build(); converter.convertQueueHierarchy(rootQueue); verify(ruleHandler, times(2)).handleMinResources(); }
@Override public void shutdown() throws PulsarClientException { try { // We will throw the last thrown exception only, though logging all of them. Throwable throwable = null; if (lookup != null) { try { lookup.close(); } catch (Throwable t) { log.warn("Failed to shutdown lookup", t); throwable = t; } } if (tcClient != null) { try { tcClient.close(); } catch (Throwable t) { log.warn("Failed to close tcClient"); throwable = t; } } // close the service url provider allocated resource. if (conf != null && conf.getServiceUrlProvider() != null) { conf.getServiceUrlProvider().close(); } try { // Shutting down eventLoopGroup separately because in some cases, cnxPool might be using different // eventLoopGroup. shutdownEventLoopGroup(eventLoopGroup); } catch (PulsarClientException e) { log.warn("Failed to shutdown eventLoopGroup", e); throwable = e; } try { closeCnxPool(cnxPool); } catch (PulsarClientException e) { log.warn("Failed to shutdown cnxPool", e); throwable = e; } if (timer != null && needStopTimer) { try { timer.stop(); } catch (Throwable t) { log.warn("Failed to shutdown timer", t); throwable = t; } } try { shutdownExecutors(); } catch (PulsarClientException e) { throwable = e; } if (conf != null && conf.getAuthentication() != null) { try { conf.getAuthentication().close(); } catch (Throwable t) { log.warn("Failed to close authentication", t); throwable = t; } } if (throwable != null) { throw throwable; } } catch (Throwable t) { log.warn("Failed to shutdown Pulsar client", t); throw PulsarClientException.unwrap(t); } }
@Test public void testInitializeWithoutTimer() throws Exception { ClientConfigurationData conf = new ClientConfigurationData(); conf.setServiceUrl("pulsar://localhost:6650"); PulsarClientImpl client = new PulsarClientImpl(conf); HashedWheelTimer timer = mock(HashedWheelTimer.class); Field field = client.getClass().getDeclaredField("timer"); field.setAccessible(true); field.set(client, timer); client.shutdown(); verify(timer).stop(); }
public String createULID(Message message) { checkTimestamp(message.getTimestamp().getMillis()); try { return createULID(message.getTimestamp().getMillis(), message.getSequenceNr()); } catch (Exception e) { LOG.error("Exception while creating ULID.", e); return ulid.nextULID(message.getTimestamp().getMillis()); } }
@Test public void doesNotAcceptTooLargeTimestamp() { final MessageULIDGenerator generator = new MessageULIDGenerator(new ULID()); final DateTime largeDate = DateTime.parse("+10889-08-02T05:31:50.656Z"); final Message message = messageFactory.createMessage("foo", "source", largeDate); assertThatThrownBy(() -> { generator.createULID(message); }).isInstanceOf(IllegalArgumentException.class); }
public static <T> WithTimestamps<T> of(SerializableFunction<T, Instant> fn) { return new WithTimestamps<>(fn, Duration.ZERO); }
@Test @Category(NeedsRunner.class) public void withTimestampsBackwardsInTimeShouldThrow() { SerializableFunction<String, Instant> timestampFn = input -> new Instant(Long.valueOf(input)); SerializableFunction<String, Instant> backInTimeFn = input -> new Instant(Long.valueOf(input)).minus(Duration.millis(1000L)); String yearTwoThousand = "946684800000"; p.apply(Create.of("1234", "0", Integer.toString(Integer.MAX_VALUE), yearTwoThousand)) .apply("WithTimestamps", WithTimestamps.of(timestampFn)) .apply("AddSkew", WithTimestamps.of(backInTimeFn)); thrown.expect(PipelineExecutionException.class); thrown.expectCause(isA(IllegalArgumentException.class)); thrown.expectMessage("no earlier than the timestamp of the current input"); p.run(); }
public CacheStats minus(CacheStats other) { return CacheStats.of( Math.max(0L, hitCount - other.hitCount), Math.max(0L, missCount - other.missCount), Math.max(0L, loadSuccessCount - other.loadSuccessCount), Math.max(0L, loadFailureCount - other.loadFailureCount), Math.max(0L, totalLoadTime - other.totalLoadTime), Math.max(0L, evictionCount - other.evictionCount), Math.max(0L, evictionWeight - other.evictionWeight)); }
@Test public void minus() { var one = CacheStats.of(11, 13, 17, 19, 23, 27, 54); var two = CacheStats.of(53, 47, 43, 41, 37, 31, 62); var diff = two.minus(one); checkStats(diff, 76, 42, 42.0 / 76, 34, 34.0 / 76, 26, 22, 22.0 / 48, 26 + 22, 14, 14.0 / (26 + 22), 4, 8); assertThat(one.minus(two)).isEqualTo(CacheStats.empty()); }
KafkaSourceConsumerFn( Class<?> connectorClass, SourceRecordMapper<T> fn, Integer maxRecords, Long milisecondsToRun) { this.connectorClass = (Class<? extends SourceConnector>) connectorClass; this.fn = fn; this.maxRecords = maxRecords; this.milisecondsToRun = milisecondsToRun; }
@Test public void testKafkaSourceConsumerFn() { Map<String, String> config = ImmutableMap.of( "from", "1", "to", "10", "delay", "0.4", "topic", "any"); Pipeline pipeline = Pipeline.create(); PCollection<Integer> counts = pipeline .apply( Create.of(Lists.newArrayList(config)) .withCoder(MapCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of()))) .apply( ParDo.of( new KafkaSourceConsumerFn<>( CounterSourceConnector.class, sourceRecord -> ((Struct) sourceRecord.value()).getInt64("value").intValue(), 10))) .setCoder(VarIntCoder.of()); PAssert.that(counts).containsInAnyOrder(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); pipeline.run().waitUntilFinish(); }
public long getNum_blocks() { return num_blocks; }
@Test public void testGetNum_blocks() { assertEquals(TestParameters.VP_UNKNOWN_NUM_BLOCKS, chmItspHeader.getNum_blocks()); }
public static Write write() { // 1000 for batch size is good enough in many cases, // ex: if document size is large, around 10KB, the request's size will be around 10MB // if document size is small, around 1KB, the request's size will be around 1MB return new AutoValue_SolrIO_Write.Builder().setMaxBatchSize(1000).build(); }
@Test public void testWriteWithMaxBatchSize() throws Exception { SolrIO.Write write = SolrIO.write() .withConnectionConfiguration(connectionConfiguration) .to(SOLR_COLLECTION) .withMaxBatchSize(BATCH_SIZE); // write bundles size is the runner decision, we cannot force a bundle size, // so we test the Writer as a DoFn outside of a runner. try (DoFnTester<SolrInputDocument, Void> fnTester = DoFnTester.of(new SolrIO.Write.WriteFn(write))) { List<SolrInputDocument> input = SolrIOTestUtils.createDocuments(NUM_DOCS); long numDocsProcessed = 0; long numDocsInserted = 0; for (SolrInputDocument document : input) { fnTester.processElement(document); numDocsProcessed++; // test every 100 docs to avoid overloading Solr if ((numDocsProcessed % 100) == 0) { // force the index to upgrade after inserting for the inserted docs // to be searchable immediately long currentNumDocs = SolrIOTestUtils.commitAndGetCurrentNumDocs(SOLR_COLLECTION, solrClient); if ((numDocsProcessed % BATCH_SIZE) == 0) { /* bundle end */ assertEquals( "we are at the end of a bundle, we should have inserted all processed documents", numDocsProcessed, currentNumDocs); numDocsInserted = currentNumDocs; } else { /* not bundle end */ assertEquals( "we are not at the end of a bundle, we should have inserted no more documents", numDocsInserted, currentNumDocs); } } } } }
public static TableIdentifier fromJson(String json) { Preconditions.checkArgument( json != null, "Cannot parse table identifier from invalid JSON: null"); Preconditions.checkArgument( !json.isEmpty(), "Cannot parse table identifier from invalid JSON: ''"); return JsonUtil.parse(json, TableIdentifierParser::fromJson); }
@Test public void testFailWhenFieldsHaveInvalidValues() { String invalidNamespace = "{\"namespace\":\"accounting.tax\",\"name\":\"paid\"}"; assertThatThrownBy(() -> TableIdentifierParser.fromJson(invalidNamespace)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Cannot parse JSON array from non-array value: namespace: \"accounting.tax\""); String invalidName = "{\"namespace\":[\"accounting\",\"tax\"],\"name\":1234}"; assertThatThrownBy(() -> TableIdentifierParser.fromJson(invalidName)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Cannot parse to a string value: name: 1234"); }
public static DataMap getAnnotationsMap(Annotation[] as) { return annotationsToData(as, true); }
@Test(description = "Unsafe call: RestSpecAnnotation annotation with short array member", expectedExceptions = NullPointerException.class) public void failsOnRestSpecAnnotationShortArrayMember() { @UnsupportedShortArray class LocalClass { } final Annotation[] annotations = LocalClass.class.getAnnotations(); ResourceModelAnnotation.getAnnotationsMap(annotations); Assert.fail("Should fail throwing a NullPointerException"); }
public static String name(final String path) { if(String.valueOf(Path.DELIMITER).equals(path)) { return path; } if(!StringUtils.contains(path, Path.DELIMITER)) { return path; } if(StringUtils.endsWith(path, String.valueOf(Path.DELIMITER))) { return StringUtils.substringAfterLast(normalize(path), String.valueOf(Path.DELIMITER)); } return StringUtils.substringAfterLast(path, String.valueOf(Path.DELIMITER)); }
@Test public void testName() { assertEquals("p", PathNormalizer.name("/p")); assertEquals("n", PathNormalizer.name("/p/n")); assertEquals("p", PathNormalizer.name("p")); assertEquals("n", PathNormalizer.name("p/n")); }
public static Expression[] parseExpressions(String template, EvaluationContext context, String expressionPrefix, String expressionSuffix) throws ParseException { // Prepare an array for results. List<Expression> expressions = new ArrayList<>(); int startIdx = 0; while (startIdx < template.length()) { int prefixIndex = template.indexOf(expressionPrefix, startIdx); if (prefixIndex >= startIdx) { // an inner expression was found - this is a composite if (prefixIndex > startIdx) { log.debug("Found a literal expression starting at {}", startIdx); expressions.add(new LiteralExpression(template.substring(startIdx, prefixIndex))); } int afterPrefixIndex = prefixIndex + expressionPrefix.length(); int suffixIndex = skipToCorrectEndSuffix(expressionSuffix, template, afterPrefixIndex); if (suffixIndex == -1) { log.info("No ending suffix '{}' for expression starting at character {}: {}", expressionSuffix, prefixIndex, template.substring(prefixIndex)); throw new ParseException(template, prefixIndex, "No ending suffix '" + expressionSuffix + "' for expression starting at character " + prefixIndex + ": " + template.substring(prefixIndex)); } if (suffixIndex == afterPrefixIndex) { log.info("No expression defined within delimiter '{}' at character {}", expressionPrefix, prefixIndex); throw new ParseException(template, prefixIndex, "No expression defined within delimiter '" + expressionPrefix + expressionSuffix + "' at character " + prefixIndex); } String expr = template.substring(prefixIndex + expressionPrefix.length(), suffixIndex); expr = expr.trim(); if (expr.isEmpty()) { log.info("No expression defined within delimiter '{}' at character {}", expressionPrefix, prefixIndex); throw new ParseException(template, prefixIndex, "No expression defined within delimiter '" + expressionPrefix + expressionSuffix + "' at character " + prefixIndex); } expressions.add(doParseExpression(expr, context)); startIdx = suffixIndex + expressionSuffix.length(); log.debug("Expression accumulated. Pursuing with index {} on {}", startIdx, template.length()); } else { // no more expression. finalize with a literal. expressions.add(new LiteralExpression(template.substring(startIdx, template.length()))); break; } } return expressions.toArray(new Expression[0]); }
@Test void testRedirectParseExpressions() { String template = "Hello {{ guid() > put(id) }} world! This is my {{ id }}"; // Build a suitable context. EvaluationContext context = new EvaluationContext(); context.registerFunction("guid", UUIDELFunction.class); context.registerFunction("put", PutInContextELFunction.class); Expression[] expressions = ExpressionParser.parseExpressions(template, context, "{{", "}}"); assertEquals(4, expressions.length); assertTrue(expressions[0] instanceof LiteralExpression); assertTrue(expressions[1] instanceof RedirectExpression); assertTrue(expressions[2] instanceof LiteralExpression); assertTrue(expressions[3] instanceof VariableReferenceExpression); String guidValue = expressions[1].getValue(context); String contextValue = expressions[3].getValue(context); assertEquals(guidValue, contextValue); }
public static <K> ShardedKey<K> of(K key, byte[] shardId) { checkArgument(key != null, "Key should not be null!"); checkArgument(shardId != null, "Shard id should not be null!"); return new ShardedKey<K>(key, shardId); }
@Test public void testDecodeEncodeEqual() throws Exception { Coder<ShardedKey<String>> coder = ShardedKey.Coder.of(StringUtf8Coder.of()); CoderProperties.coderDecodeEncodeEqual(coder, ShardedKey.of(KEY, SHARD)); CoderProperties.coderDecodeEncodeEqual(coder, ShardedKey.of(KEY, EMPTY_SHARD)); CoderProperties.coderConsistentWithEquals( coder, ShardedKey.of(KEY, "shard_id".getBytes(UTF_8)), ShardedKey.of(KEY, "shard_id".getBytes(UTF_8))); CoderProperties.coderConsistentWithEquals( coder, ShardedKey.of(KEY, new byte[0]), ShardedKey.of(KEY, new byte[0])); CoderProperties.coderDeterministic( coder, ShardedKey.of(KEY, "shard_id".getBytes(UTF_8)), ShardedKey.of(KEY, "shard_id".getBytes(UTF_8))); CoderProperties.coderDeterministic( coder, ShardedKey.of(KEY, new byte[0]), ShardedKey.of(KEY, new byte[0])); }
@UdafFactory(description = "sum int values in a list into a single int") public static TableUdaf<List<Integer>, Integer, Integer> sumIntList() { return new TableUdaf<List<Integer>, Integer, Integer>() { @Override public Integer initialize() { return 0; } @Override public Integer aggregate(final List<Integer> valueToAdd, final Integer aggregateValue) { if (valueToAdd == null) { return aggregateValue; } return aggregateValue + sumList(valueToAdd); } @Override public Integer merge(final Integer aggOne, final Integer aggTwo) { return aggOne + aggTwo; } @Override public Integer map(final Integer agg) { return agg; } @Override public Integer undo(final List<Integer> valueToUndo, final Integer aggregateValue) { if (valueToUndo == null) { return aggregateValue; } return aggregateValue - sumList(valueToUndo); } private int sumList(final List<Integer> list) { return sum(list, initialize(), Integer::sum); } }; }
@Test public void shouldASumZeroes() { final TableUdaf<List<Integer>, Integer, Integer> udaf = ListSumUdaf.sumIntList(); final Integer[] values = new Integer[] {0, 0, 0, 0, 0}; final List<Integer> list = Arrays.asList(values); final Integer sum = udaf.aggregate(list, 0); assertThat(0, equalTo(sum)); }
public static int getNewNodeId() { return ID_COUNTER.incrementAndGet(); }
@Test void testGetNewNodeIdIsThreadSafe() throws Exception { final int numThreads = 10; final int numIdsPerThread = 100; final List<CheckedThread> threads = new ArrayList<>(); final OneShotLatch startLatch = new OneShotLatch(); final List<List<Integer>> idLists = Collections.synchronizedList(new ArrayList<>()); for (int x = 0; x < numThreads; x++) { threads.add( new CheckedThread() { @Override public void go() throws Exception { startLatch.await(); final List<Integer> ids = new ArrayList<>(); for (int c = 0; c < numIdsPerThread; c++) { ids.add(Transformation.getNewNodeId()); } idLists.add(ids); } }); } threads.forEach(Thread::start); startLatch.trigger(); for (CheckedThread thread : threads) { thread.sync(); } final Set<Integer> deduplicatedIds = idLists.stream().flatMap(List::stream).collect(Collectors.toSet()); assertThat(deduplicatedIds).hasSize(numThreads * numIdsPerThread); }
public List<Date> parse(String language) { return parse(language, new Date()); }
@Test public void testParseYesterday() { Calendar yesterday = Calendar.getInstance(); yesterday.setTime(new Date()); yesterday.add(Calendar.DAY_OF_MONTH, -1); List<Date> parse = new PrettyTimeParser().parse("yesterday"); Assert.assertFalse(parse.isEmpty()); Calendar parsedDate = Calendar.getInstance(); parsedDate.setTime(parse.get(0)); Assert.assertEquals(yesterday.get(Calendar.DAY_OF_MONTH), parsedDate.get(Calendar.DAY_OF_MONTH)); Assert.assertEquals(yesterday.get(Calendar.MONTH), parsedDate.get(Calendar.MONTH)); Assert.assertEquals(yesterday.get(Calendar.YEAR), parsedDate.get(Calendar.YEAR)); }
@Override public void process(HttpResponse response, HttpContext context) throws HttpException, IOException { List<Header> warnings = Arrays.stream(response.getHeaders("Warning")).filter(header -> !this.isDeprecationMessage(header.getValue())).collect(Collectors.toList()); response.removeHeaders("Warning"); warnings.stream().forEach(header -> response.addHeader(header)); }
@Test public void testInterceptorMultipleHeaderFilteredWarning2() throws IOException, HttpException { ElasticsearchFilterDeprecationWarningsInterceptor interceptor = new ElasticsearchFilterDeprecationWarningsInterceptor(); HttpResponse response = new BasicHttpResponse(new BasicStatusLine(new ProtocolVersion("HTTP", 0, 0), 0, null)); response.addHeader("Test", "This header should not trigger the interceptor."); response.addHeader("Warning", "This warning should not trigger the interceptor."); response.addHeader("Warning", "This text contains the trigger: but in a future major version, direct access to system indices and their aliases will not be allowed - and should be filtered out"); assertThat(response.getAllHeaders()) .as("Number of Headers should be 3 before start.") .hasSize(3); interceptor.process(response, null); assertThat(response.getAllHeaders()) .as("Number of Headers should be 1 less after running the interceptor.") .hasSize(2); }
public void addMessageListener(ReleaseMessageListener listener) { if (!listeners.contains(listener)) { listeners.add(listener); } }
@Test public void testScanMessageWithGapAndNotifyMessageListener() throws Exception { String someMessage = "someMessage"; long someId = 1; ReleaseMessage someReleaseMessage = assembleReleaseMessage(someId, someMessage); String someMissingMessage = "someMissingMessage"; long someMissingId = 2; ReleaseMessage someMissingReleaseMessage = assembleReleaseMessage(someMissingId, someMissingMessage); String anotherMessage = "anotherMessage"; long anotherId = 3; ReleaseMessage anotherReleaseMessage = assembleReleaseMessage(anotherId, anotherMessage); String anotherMissingMessage = "anotherMissingMessage"; long anotherMissingId = 4; ReleaseMessage anotherMissingReleaseMessage = assembleReleaseMessage(anotherMissingId, anotherMissingMessage); long someRolledBackId = 5; String yetAnotherMessage = "yetAnotherMessage"; long yetAnotherId = 6; ReleaseMessage yetAnotherReleaseMessage = assembleReleaseMessage(yetAnotherId, yetAnotherMessage); ArrayList<ReleaseMessage> receivedMessage = Lists.newArrayList(); SettableFuture<ReleaseMessage> someListenerFuture = SettableFuture.create(); ReleaseMessageListener someListener = (message, channel) -> receivedMessage.add(message); releaseMessageScanner.addMessageListener(someListener); when(releaseMessageRepository.findFirst500ByIdGreaterThanOrderByIdAsc(0L)).thenReturn( Lists.newArrayList(someReleaseMessage)); await().untilAsserted(() -> { assertEquals(1, receivedMessage.size()); assertSame(someReleaseMessage, receivedMessage.get(0)); }); when(releaseMessageRepository.findFirst500ByIdGreaterThanOrderByIdAsc(someId)).thenReturn( Lists.newArrayList(anotherReleaseMessage)); await().untilAsserted(() -> { assertEquals(2, receivedMessage.size()); assertSame(someReleaseMessage, receivedMessage.get(0)); assertSame(anotherReleaseMessage, receivedMessage.get(1)); }); when(releaseMessageRepository.findAllById(Sets.newHashSet(someMissingId))) .thenReturn(Lists.newArrayList(someMissingReleaseMessage)); await().untilAsserted(() -> { assertEquals(3, receivedMessage.size()); assertSame(someReleaseMessage, receivedMessage.get(0)); assertSame(anotherReleaseMessage, receivedMessage.get(1)); assertSame(someMissingReleaseMessage, receivedMessage.get(2)); }); when(releaseMessageRepository.findFirst500ByIdGreaterThanOrderByIdAsc(anotherId)).thenReturn( Lists.newArrayList(yetAnotherReleaseMessage)); await().untilAsserted(() -> { assertEquals(4, receivedMessage.size()); assertSame(someReleaseMessage, receivedMessage.get(0)); assertSame(anotherReleaseMessage, receivedMessage.get(1)); assertSame(someMissingReleaseMessage, receivedMessage.get(2)); assertSame(yetAnotherReleaseMessage, receivedMessage.get(3)); }); when(releaseMessageRepository.findAllById(Sets.newHashSet(anotherMissingId, someRolledBackId))) .thenReturn(Lists.newArrayList(anotherMissingReleaseMessage)); await().untilAsserted(() -> { assertEquals(5, receivedMessage.size()); assertSame(someReleaseMessage, receivedMessage.get(0)); assertSame(anotherReleaseMessage, receivedMessage.get(1)); assertSame(someMissingReleaseMessage, receivedMessage.get(2)); assertSame(yetAnotherReleaseMessage, receivedMessage.get(3)); assertSame(anotherMissingReleaseMessage, receivedMessage.get(4)); }); }
@Override public DataSink createDataSink(Context context) { FactoryHelper.createFactoryHelper(this, context) .validateExcept(TABLE_CREATE_PROPERTIES_PREFIX, SINK_PROPERTIES_PREFIX); StarRocksSinkOptions sinkOptions = buildSinkConnectorOptions(context.getFactoryConfiguration()); TableCreateConfig tableCreateConfig = TableCreateConfig.from(context.getFactoryConfiguration()); SchemaChangeConfig schemaChangeConfig = SchemaChangeConfig.from(context.getFactoryConfiguration()); String zoneStr = context.getFactoryConfiguration().get(PIPELINE_LOCAL_TIME_ZONE); ZoneId zoneId = PIPELINE_LOCAL_TIME_ZONE.defaultValue().equals(zoneStr) ? ZoneId.systemDefault() : ZoneId.of(zoneStr); return new StarRocksDataSink(sinkOptions, tableCreateConfig, schemaChangeConfig, zoneId); }
@Test void testUnsupportedOption() { DataSinkFactory sinkFactory = FactoryDiscoveryUtils.getFactoryByIdentifier("starrocks", DataSinkFactory.class); Assertions.assertThat(sinkFactory).isInstanceOf(StarRocksDataSinkFactory.class); Configuration conf = Configuration.fromMap( ImmutableMap.<String, String>builder() .put("jdbc-url", "jdbc:mysql://127.0.0.1:9030") .put("load-url", "127.0.0.1:8030") .put("username", "root") .put("password", "") .put("unsupported_key", "unsupported_value") .build()); Assertions.assertThatThrownBy( () -> sinkFactory.createDataSink( new FactoryHelper.DefaultContext( conf, conf, Thread.currentThread().getContextClassLoader()))) .isInstanceOf(ValidationException.class) .hasMessageContaining( "Unsupported options found for 'starrocks'.\n\n" + "Unsupported options:\n\n" + "unsupported_key"); }
public Mono<Void> createStreamAppAcl(KafkaCluster cluster, CreateStreamAppAclDTO request) { return adminClientService.get(cluster) .flatMap(ac -> createAclsWithLogging(ac, createStreamAppBindings(request))) .then(); }
@Test void createsStreamAppDependantAcls() { ArgumentCaptor<Collection<AclBinding>> createdCaptor = ArgumentCaptor.forClass(Collection.class); when(adminClientMock.createAcls(createdCaptor.capture())) .thenReturn(Mono.empty()); var principal = UUID.randomUUID().toString(); var host = UUID.randomUUID().toString(); aclsService.createStreamAppAcl( CLUSTER, new CreateStreamAppAclDTO() .principal(principal) .host(host) .inputTopics(List.of("t1")) .outputTopics(List.of("t2", "t3")) .applicationId("appId1") ).block(); // Read on input topics, Write on output topics // ALL on applicationId-prefixed Groups and Topics Collection<AclBinding> createdBindings = createdCaptor.getValue(); assertThat(createdBindings) .hasSize(5) .contains(new AclBinding( new ResourcePattern(ResourceType.TOPIC, "t1", PatternType.LITERAL), new AccessControlEntry(principal, host, AclOperation.READ, AclPermissionType.ALLOW))) .contains(new AclBinding( new ResourcePattern(ResourceType.TOPIC, "t2", PatternType.LITERAL), new AccessControlEntry(principal, host, AclOperation.WRITE, AclPermissionType.ALLOW))) .contains(new AclBinding( new ResourcePattern(ResourceType.TOPIC, "t3", PatternType.LITERAL), new AccessControlEntry(principal, host, AclOperation.WRITE, AclPermissionType.ALLOW))) .contains(new AclBinding( new ResourcePattern(ResourceType.GROUP, "appId1", PatternType.PREFIXED), new AccessControlEntry(principal, host, AclOperation.ALL, AclPermissionType.ALLOW))) .contains(new AclBinding( new ResourcePattern(ResourceType.TOPIC, "appId1", PatternType.PREFIXED), new AccessControlEntry(principal, host, AclOperation.ALL, AclPermissionType.ALLOW))); }
@Override public void close() { }
@Test public void shouldSucceed_remoteNodeExceptionWithRetry() throws ExecutionException, InterruptedException { // Given: final AtomicReference<Set<KsqlNode>> nodes = new AtomicReference<>( ImmutableSet.of(ksqlNodeLocal, ksqlNodeRemote)); final PushRouting routing = new PushRouting(sqr -> nodes.get(), 50, true); AtomicReference<TestRemotePublisher> remotePublisher = new AtomicReference<>(); AtomicInteger remoteCount = new AtomicInteger(0); when(simpleKsqlClient.makeQueryRequestStreamed(any(), any(), any(), any())) .thenAnswer(a -> { remotePublisher.set(new TestRemotePublisher(context)); if (remoteCount.incrementAndGet() == 2) { remotePublisher.get().accept(REMOTE_ROW1); remotePublisher.get().accept(REMOTE_ROW2); } return createFuture(RestResponse.successful(200, remotePublisher.get())); }); // When: final PushConnectionsHandle handle = handlePushRouting(routing); final AtomicReference<Throwable> exception = new AtomicReference<>(null); handle.onException(exception::set); context.runOnContext(v -> { localPublisher.accept(LOCAL_ROW1); localPublisher.accept(LOCAL_ROW2); remotePublisher.get().error(new RuntimeException("Random error")); }); Set<List<?>> rows = waitOnRows(4); handle.close(); // Then: assertThat(rows.contains(LOCAL_ROW1.value().values()), is(true)); assertThat(rows.contains(LOCAL_ROW2.value().values()), is(true)); assertThat(rows.contains(REMOTE_ROW1.getRow().get().getColumns()), is(true)); assertThat(rows.contains(REMOTE_ROW2.getRow().get().getColumns()), is(true)); }
static boolean fieldMatch(Object repoObj, Object filterObj) { return filterObj == null || repoObj.equals(filterObj); }
@Test public void testFieldMatchWithNonStringObjectsShouldReturnFalse() { assertFalse(Utilities.fieldMatch(42, "42")); }
public static Ip4Prefix valueOf(int address, int prefixLength) { return new Ip4Prefix(Ip4Address.valueOf(address), prefixLength); }
@Test public void testVersion() { Ip4Prefix ipPrefix; // IPv4 ipPrefix = Ip4Prefix.valueOf("0.0.0.0/0"); assertThat(ipPrefix.version(), is(IpAddress.Version.INET)); }
public static byte[] parseMAC(String value) { final byte[] machineId; final char separator; switch (value.length()) { case 17: separator = value.charAt(2); validateMacSeparator(separator); machineId = new byte[EUI48_MAC_ADDRESS_LENGTH]; break; case 23: separator = value.charAt(2); validateMacSeparator(separator); machineId = new byte[EUI64_MAC_ADDRESS_LENGTH]; break; default: throw new IllegalArgumentException("value is not supported [MAC-48, EUI-48, EUI-64]"); } final int end = machineId.length - 1; int j = 0; for (int i = 0; i < end; ++i, j += 3) { final int sIndex = j + 2; machineId[i] = StringUtil.decodeHexByte(value, j); if (value.charAt(sIndex) != separator) { throw new IllegalArgumentException("expected separator '" + separator + " but got '" + value.charAt(sIndex) + "' at index: " + sIndex); } } machineId[end] = StringUtil.decodeHexByte(value, j); return machineId; }
@Test public void testParseMacEUI48() { assertArrayEquals(new byte[]{0, (byte) 0xaa, 0x11, (byte) 0xbb, 0x22, (byte) 0xcc}, parseMAC("00-AA-11-BB-22-CC")); assertArrayEquals(new byte[]{0, (byte) 0xaa, 0x11, (byte) 0xbb, 0x22, (byte) 0xcc}, parseMAC("00:AA:11:BB:22:CC")); }
public static Pair<String, String> encryptHandler(String dataId, String content) { if (!checkCipher(dataId)) { return Pair.with("", content); } Optional<String> algorithmName = parseAlgorithmName(dataId); Optional<EncryptionPluginService> optional = algorithmName.flatMap( EncryptionPluginManager.instance()::findEncryptionService); if (!optional.isPresent()) { LOGGER.warn("[EncryptionHandler] [encryptHandler] No encryption program with the corresponding name found"); return Pair.with("", content); } EncryptionPluginService encryptionPluginService = optional.get(); String secretKey = encryptionPluginService.generateSecretKey(); String encryptContent = encryptionPluginService.encrypt(secretKey, content); return Pair.with(encryptionPluginService.encryptSecretKey(secretKey), encryptContent); }
@Test void testEncrypt() { String dataId = "cipher-mockAlgo-application"; String content = "content"; String sec = mockEncryptionPluginService.generateSecretKey(); Pair<String, String> pair = EncryptionHandler.encryptHandler(dataId, content); assertNotNull(pair); assertEquals(mockEncryptionPluginService.encrypt(sec, content), pair.getSecond(), "should return encrypted content."); assertEquals(mockEncryptionPluginService.encryptSecretKey(sec), pair.getFirst(), "should return encrypted secret key."); }
public Consumer getConsumerByConsumerId(long consumerId) { return consumerRepository.findById(consumerId).orElse(null); }
@Test public void testGetConsumerByConsumerId() throws Exception { long someConsumerId = 1; Consumer someConsumer = mock(Consumer.class); when(consumerRepository.findById(someConsumerId)).thenReturn(Optional.of(someConsumer)); assertEquals(someConsumer, consumerService.getConsumerByConsumerId(someConsumerId)); verify(consumerRepository, times(1)).findById(someConsumerId); }
protected boolean isMatch(String invokerId) { if (allEffective) { return true; } else { //如果没有排除,那么只生效指定id,其余不生效。 if (excludeId.size() == 0) { return effectiveId.contains(invokerId); //如果有排除,那么除排除id外,其余都生效。 } else { return !excludeId.contains(invokerId); } } }
@Test public void testIsMatch() { TestCustomizeFilter testCustomizeFilter = new TestCustomizeFilter(); Assert.assertTrue(testCustomizeFilter.isMatch("")); testCustomizeFilter = new TestCustomizeFilter(); testCustomizeFilter.setIdRule("AAA,BBB"); AbstractInterfaceConfig configA = new ProviderConfig(); configA.setInterfaceId(Serializer.class.getName()); configA.setId("AAA"); FilterInvoker filterInvokerA = new FilterInvoker(null, null, configA); Assert.assertEquals(true, testCustomizeFilter.needToLoad(filterInvokerA)); }
@Override public List<Column> getPartitionColumns(Map<ColumnId, Column> idToColumn) { List<Column> columns = MetaUtils.getColumnsByColumnIds(idToColumn, partitionColumnIds); for (int i = 0; i < columns.size(); i++) { Expr expr = partitionExprs.get(i).convertToColumnNameExpr(idToColumn); Column column = columns.get(i); if (expr.getType().getPrimitiveType() != PrimitiveType.INVALID_TYPE && expr.getType().getPrimitiveType() != column.getType().getPrimitiveType()) { Column newColumn = new Column(column); newColumn.setType(expr.getType()); columns.set(i, newColumn); } } return columns; }
@Test public void testInitUseSlotRef() { Column k1 = new Column("k1", new ScalarType(PrimitiveType.DATETIME), true, null, "", ""); SlotRef slotRef = new SlotRef(tableName, "k1"); partitionExprs.add(ColumnIdExpr.create(slotRef)); List<Column> schema = Collections.singletonList(k1); ExpressionRangePartitionInfo expressionRangePartitionInfo = new ExpressionRangePartitionInfo(partitionExprs, schema, PartitionType.RANGE); List<Column> partitionColumns = expressionRangePartitionInfo.getPartitionColumns( MetaUtils.buildIdToColumn(schema)); Assert.assertEquals(partitionColumns.size(), 1); Assert.assertEquals(partitionColumns.get(0), k1); }
public static HeaderTemplate create(String name, Iterable<String> values) { if (name == null || name.isEmpty()) { throw new IllegalArgumentException("name is required."); } if (values == null) { throw new IllegalArgumentException("values are required"); } return new HeaderTemplate(name, values, Util.UTF_8); }
@Test void it_should_throw_exception_when_name_is_empty() { IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> HeaderTemplate.create("", Collections.singletonList("test"))); assertThat(exception.getMessage()).isEqualTo("name is required."); }
@JsonProperty("rootPath") public void setJerseyRootPath(String jerseyRootPath) { this.jerseyRootPath = Optional.ofNullable(jerseyRootPath); }
@Test void usesYamlDefinedPattern() { serverFactory.setJerseyRootPath(YAML_SET_PATTERN); jerseyEnvironment.setUrlPattern(RUN_SET_PATTERN); serverFactory.build(environment); assertThat(jerseyEnvironment.getUrlPattern()).isEqualTo(YAML_SET_PATTERN); }
@Override public boolean support(AnnotatedElement annotatedEle) { return annotatedEle instanceof Method; }
@Test public void supportTest() { AnnotationScanner scanner = new MethodAnnotationScanner(); assertTrue(scanner.support(ReflectUtil.getMethod(Example.class, "test"))); assertFalse(scanner.support(null)); assertFalse(scanner.support(Example.class)); assertFalse(scanner.support(ReflectUtil.getField(Example.class, "id"))); }
@Override public String getName() { return ANALYZER_NAME; }
@Test public void testAnalyzeGemspec() throws AnalysisException { final Dependency result = new Dependency(BaseTest.getResourceAsFile(this, "ruby/vulnerable/gems/rails-4.1.15/vendor/bundle/ruby/2.2.0/specifications/dalli-2.7.5.gemspec")); analyzer.analyze(result, null); final String vendorString = result.getEvidence(EvidenceType.VENDOR).toString(); assertThat(vendorString, containsString("Peter M. Goldstein")); assertThat(vendorString, containsString("Mike Perham")); assertThat(vendorString, containsString("peter.m.goldstein@gmail.com")); assertThat(vendorString, containsString("https://github.com/petergoldstein/dalli")); assertThat(vendorString, containsString("MIT")); assertThat(result.getEvidence(EvidenceType.PRODUCT).toString(), containsString("dalli")); assertThat(result.getEvidence(EvidenceType.PRODUCT).toString(), containsString("High performance memcached client for Ruby")); assertThat(result.getEvidence(EvidenceType.VERSION).toString(), containsString("2.7.5")); assertEquals("dalli", result.getName()); assertEquals("2.7.5", result.getVersion()); assertEquals(RubyBundlerAnalyzer.DEPENDENCY_ECOSYSTEM, result.getEcosystem()); assertEquals("dalli:2.7.5", result.getDisplayFileName()); }
@Override public String toString() { StringBuilder b = new StringBuilder(); if (StringUtils.isNotBlank(protocol)) { b.append(protocol); b.append("://"); } if (StringUtils.isNotBlank(host)) { b.append(host); } if (!isPortDefault() && port != -1) { b.append(':'); b.append(port); } if (StringUtils.isNotBlank(path)) { // If no scheme/host/port, leave the path as is if (b.length() > 0 && !path.startsWith("/")) { b.append('/'); } b.append(encodePath(path)); } if (queryString != null && !queryString.isEmpty()) { b.append(queryString.toString()); } if (fragment != null) { b.append("#"); b.append(encodePath(fragment)); } return b.toString(); }
@Test public void testNullOrBlankURLs() { s = null; t = ""; assertEquals(t, new HttpURL(s).toString()); s = ""; t = ""; assertEquals(t, new HttpURL(s).toString()); s = " "; t = ""; assertEquals(t, new HttpURL(s).toString()); }
public static PrestoSparkRowBatchBuilder builder(int partitionCount, int targetAverageRowSizeInBytes) { checkArgument(partitionCount > 0, "partitionCount must be greater then zero: %s", partitionCount); int targetSizeInBytes = partitionCount * targetAverageRowSizeInBytes; targetSizeInBytes = max(targetSizeInBytes, MIN_TARGET_SIZE_IN_BYTES); targetSizeInBytes = min(targetSizeInBytes, MAX_TARGET_SIZE_IN_BYTES); targetAverageRowSizeInBytes = min(targetSizeInBytes / partitionCount, targetAverageRowSizeInBytes); return builder( partitionCount, targetSizeInBytes, DEFAULT_EXPECTED_ROWS_COUNT, targetAverageRowSizeInBytes, MULTI_ROW_ENTRY_MAX_SIZE_IN_BYTES, MULTI_ROW_ENTRY_MAX_ROW_COUNT); }
@Test public void testBuilderFull() { PrestoSparkRowBatchBuilder builder = PrestoSparkRowBatch.builder( 10, 5, 10, NO_TARGET_ENTRY_SIZE_REQUIREMENT, UNLIMITED_MAX_ENTRY_SIZE, UNLIMITED_MAX_ENTRY_ROW_COUNT); assertFalse(builder.isFull()); assertTrue(builder.isEmpty()); addRow(builder, createRow(1, "12345")); assertTrue(builder.isFull()); assertFalse(builder.isEmpty()); }
@Converter public static InputStream toInputStream(IoBuffer buffer) { return buffer.asInputStream(); }
@Test public void testToInputStream() throws Exception { byte[] in = "Hello World".getBytes(); IoBuffer bb = IoBuffer.wrap(in); try (InputStream is = MinaConverter.toInputStream(bb)) { for (byte b : in) { int out = is.read(); assertEquals(b, out); } } }
@VisibleForTesting protected Map<String, Object> read(String json) { final Object result = jsonPath.read(json); final Map<String, Object> fields = Maps.newHashMap(); if (result instanceof Integer || result instanceof Double || result instanceof Long) { fields.put("result", result); } else if (result instanceof List) { final List list = (List) result; if (!list.isEmpty()) { fields.put("result", list.get(0).toString()); } } else { // Now it's most likely a string or something we do not map. fields.put("result", result.toString()); } return fields; }
@Test public void testReadResultingInDouble() throws Exception { String json = "{\"url\":\"https://api.github.com/repos/Graylog2/graylog2-server/releases/assets/22660\",\"some_double\":0.50,\"id\":22660,\"name\":\"graylog2-server-0.20.0-preview.1.tgz\",\"label\":\"graylog2-server-0.20.0-preview.1.tgz\",\"content_type\":\"application/octet-stream\",\"state\":\"uploaded\",\"size\":38179285,\"updated_at\":\"2013-09-30T20:05:46Z\"}"; String path = "$.some_double"; Map<String, Object> result = new JsonPathCodec(configOf(CK_PATH, path), objectMapperProvider.get(), messageFactory).read(json); assertThat(result.size()).isEqualTo(1); assertThat(result.get("result")).isEqualTo(0.5); }
static Segment fromString(String segmentString) { return parseFromString(segmentString); }
@Test public void fromString_allEmptyTokens_returnsNullSegment() { assertThat(Segment.fromString("...")).isEqualTo(Segment.NULL); }
@Override public Optional<SimpleLock> lock(LockConfiguration lockConfiguration) { if (lockConfiguration.getLockAtMostFor().compareTo(minimalLockAtMostFor) < 0) { throw new IllegalArgumentException( "Can not use KeepAliveLockProvider with lockAtMostFor shorter than " + minimalLockAtMostFor); } Optional<SimpleLock> lock = wrapped.lock(lockConfiguration); return lock.map(simpleLock -> new KeepAliveLock(lockConfiguration, simpleLock, executorService)); }
@Test void shouldCancelIfCanNotExtend() { mockExtension(originalLock, Optional.empty()); Optional<SimpleLock> lock = provider.lock(lockConfiguration); assertThat(lock).isNotNull(); tickMs(1_500); verify(originalLock).extend(lockConfiguration.getLockAtMostFor(), ofMillis(500)); tickMs(10_000); verifyNoMoreInteractions(originalLock); }
@Override public void removeConfigInfo(final String dataId, final String group, final String tenant, final String srcIp, final String srcUser) { tjt.execute(new TransactionCallback<Boolean>() { final Timestamp time = new Timestamp(System.currentTimeMillis()); @Override public Boolean doInTransaction(TransactionStatus status) { try { ConfigInfo configInfo = findConfigInfo(dataId, group, tenant); if (configInfo != null) { removeConfigInfoAtomic(dataId, group, tenant, srcIp, srcUser); removeTagByIdAtomic(configInfo.getId()); historyConfigInfoPersistService.insertConfigHistoryAtomic(configInfo.getId(), configInfo, srcIp, srcUser, time, "D"); } } catch (CannotGetJdbcConnectionException e) { LogUtil.FATAL_LOG.error("[db-error] " + e, e); throw e; } return Boolean.TRUE; } }); }
@Test void testRemoveConfigInfo() { String dataId = "dataId4567"; String group = "group3456789"; String tenant = "tenant4567890"; //mock exist config info ConfigInfoWrapper configInfoWrapperOld = new ConfigInfoWrapper(); configInfoWrapperOld.setDataId(dataId); configInfoWrapperOld.setGroup(group); configInfoWrapperOld.setTenant(tenant); configInfoWrapperOld.setAppName("old_app"); configInfoWrapperOld.setContent("old content"); configInfoWrapperOld.setMd5("old_md5"); configInfoWrapperOld.setId(12345678765L); configInfoWrapperOld.setEncryptedDataKey("key3456"); Mockito.when(jdbcTemplate.queryForObject(anyString(), eq(new Object[] {dataId, group, tenant}), eq(CONFIG_INFO_WRAPPER_ROW_MAPPER))) .thenReturn(configInfoWrapperOld); String srcIp = "srcIp1234"; String srcUser = "srcUser"; externalConfigInfoPersistService.removeConfigInfo(dataId, group, tenant, srcIp, srcUser); //expect delete to be invoked Mockito.verify(jdbcTemplate, times(1)).update(anyString(), eq(dataId), eq(group), eq(tenant)); //expect delete tags to be invoked Mockito.verify(jdbcTemplate, times(1)).update(anyString(), eq(configInfoWrapperOld.getId())); //expect insert delete history Mockito.verify(historyConfigInfoPersistService, times(1)) .insertConfigHistoryAtomic(eq(configInfoWrapperOld.getId()), eq(configInfoWrapperOld), eq(srcIp), eq(srcUser), any(), eq("D")); }
public static <T extends PipelineOptions> T as(Class<T> klass) { return new Builder().as(klass); }
@Test public void testHavingSettersGettersFromSeparateInterfacesIsValid() { PipelineOptionsFactory.as(CombinedObject.class); }
@Udf public <T extends Comparable<? super T>> T arrayMax(@UdfParameter( description = "Array of values from which to find the maximum") final List<T> input) { if (input == null) { return null; } T candidate = null; for (T thisVal : input) { if (thisVal != null) { if (candidate == null) { candidate = thisVal; } else if (thisVal.compareTo(candidate) > 0) { candidate = thisVal; } } } return candidate; }
@Test public void shouldReturnNullForNullInput() { assertThat(udf.arrayMax((List<String>) null), is(nullValue())); }
@Override public boolean nullPlusNonNullIsNull() { return false; }
@Test void assertNullPlusNonNullIsNull() { assertFalse(metaData.nullPlusNonNullIsNull()); }
@Override public void goToFinished(ArchivedExecutionGraph archivedExecutionGraph) { transitionToState(new Finished.Factory(this, archivedExecutionGraph, LOG)); }
@Test void testGoToFinished() throws Exception { final AdaptiveScheduler scheduler = new AdaptiveSchedulerBuilder( createJobGraph(), mainThreadExecutor, EXECUTOR_RESOURCE.getExecutor()) .build(); final ArchivedExecutionGraph archivedExecutionGraph = new ArchivedExecutionGraphBuilder().setState(JobStatus.FAILED).build(); scheduler.goToFinished(archivedExecutionGraph); assertThat(scheduler.getState()).isInstanceOf(Finished.class); }
static <T extends Comparable<? super T>> int compareListWithFillValue( List<T> left, List<T> right, T fillValue) { int longest = Math.max(left.size(), right.size()); for (int i = 0; i < longest; i++) { T leftElement = fillValue; T rightElement = fillValue; if (i < left.size()) { leftElement = left.get(i); } if (i < right.size()) { rightElement = right.get(i); } int compareResult = leftElement.compareTo(rightElement); if (compareResult != 0) { return compareResult; } } return 0; }
@Test public void compareWithFillValue_nonEmptyListSameSizeEqualValue_returnsZero() { assertThat( ComparisonUtility.compareListWithFillValue( Lists.newArrayList(1, 2, 3), Lists.newArrayList(1, 2, 3), 100)) .isEqualTo(0); }
@Override public KiePMMLDroolsModelWithSources getKiePMMLModelWithSources(final CompilationDTO<T> compilationDTO) { logger.trace("getKiePMMLModelWithSources {} {} {}", compilationDTO.getPackageName(), compilationDTO.getFields(), compilationDTO.getModel()); try { final Map<String, KiePMMLOriginalTypeGeneratedType> fieldTypeMap = new HashMap<>(); KiePMMLDroolsAST kiePMMLDroolsAST = getKiePMMLDroolsASTCommon(compilationDTO.getFields(), compilationDTO.getModel(), fieldTypeMap); final DroolsCompilationDTO<T> droolsCompilationDTO = DroolsCompilationDTO.fromCompilationDTO(compilationDTO, fieldTypeMap); Map<String, String> sourcesMap = getKiePMMLDroolsModelSourcesMap(droolsCompilationDTO); PackageDescr packageDescr = getPackageDescr(kiePMMLDroolsAST, compilationDTO.getPackageName()); String pkgUUID = getPkgUUID("gav", compilationDTO.getPackageName()); packageDescr.setPreferredPkgUUID(pkgUUID); return new KiePMMLDroolsModelWithSources(compilationDTO.getFileName(), compilationDTO.getModelName(), compilationDTO.getPackageName(), compilationDTO.getKieMiningFields(), compilationDTO.getKieOutputFields(), compilationDTO.getKieTargetFields(), sourcesMap, pkgUUID, packageDescr); } catch (Exception e) { throw new KiePMMLException(e); } }
@Test void getKiePMMLModelWithSources() { final CommonCompilationDTO<Scorecard> compilationDTO = CommonCompilationDTO.fromGeneratedPackageNameAndFields(PACKAGE_NAME, pmml, scorecard, new PMMLCompilationContextMock(), SOURCE_1); KiePMMLDroolsModelWithSources retrieved = droolsModelProvider.getKiePMMLModelWithSources(compilationDTO); assertThat(retrieved).isNotNull(); assertThat(retrieved.getSourcesMap()).isEqualTo(SOURCE_MAP); String expectedPackageName = compilationDTO.getPackageName(); assertThat(retrieved.getKModulePackageName()).isEqualTo(expectedPackageName); assertThat(retrieved.getName()).isEqualTo(scorecard.getModelName()); PackageDescr packageDescr = retrieved.getPackageDescr(); commonVerifyPackageDescr(packageDescr, expectedPackageName); assertThat(retrieved).isNotNull(); }
public static EventPublisher getPublisher(Class<? extends Event> topic) { if (ClassUtils.isAssignableFrom(SlowEvent.class, topic)) { return INSTANCE.sharePublisher; } return INSTANCE.publisherMap.get(topic.getCanonicalName()); }
@Test void testGetPublisher() { assertEquals(NotifyCenter.getSharePublisher(), NotifyCenter.getPublisher(TestSlowEvent.class)); assertTrue(NotifyCenter.getPublisher(TestEvent.class) instanceof DefaultPublisher); }
@Override public void createNode(K8sNode node) { checkNotNull(node, ERR_NULL_NODE); K8sNode intNode; K8sNode extNode; K8sNode localNode; K8sNode tunNode; if (node.intgBridge() == null) { String deviceIdStr = genDpid(deviceIdCounter.incrementAndGet()); checkNotNull(deviceIdStr, ERR_NULL_DEVICE_ID); intNode = node.updateIntgBridge(DeviceId.deviceId(deviceIdStr)); checkArgument(!hasIntgBridge(intNode.intgBridge(), intNode.hostname()), NOT_DUPLICATED_MSG, intNode.intgBridge()); } else { intNode = node; checkArgument(!hasIntgBridge(intNode.intgBridge(), intNode.hostname()), NOT_DUPLICATED_MSG, intNode.intgBridge()); } if (intNode.extBridge() == null) { String deviceIdStr = genDpid(deviceIdCounter.incrementAndGet()); checkNotNull(deviceIdStr, ERR_NULL_DEVICE_ID); extNode = intNode.updateExtBridge(DeviceId.deviceId(deviceIdStr)); checkArgument(!hasExtBridge(extNode.extBridge(), extNode.hostname()), NOT_DUPLICATED_MSG, extNode.extBridge()); } else { extNode = intNode; checkArgument(!hasExtBridge(extNode.extBridge(), extNode.hostname()), NOT_DUPLICATED_MSG, extNode.extBridge()); } if (node.localBridge() == null) { String deviceIdStr = genDpid(deviceIdCounter.incrementAndGet()); checkNotNull(deviceIdStr, ERR_NULL_DEVICE_ID); localNode = extNode.updateLocalBridge(DeviceId.deviceId(deviceIdStr)); checkArgument(!hasLocalBridge(localNode.localBridge(), localNode.hostname()), NOT_DUPLICATED_MSG, localNode.localBridge()); } else { localNode = extNode; checkArgument(!hasLocalBridge(localNode.localBridge(), localNode.hostname()), NOT_DUPLICATED_MSG, localNode.localBridge()); } if (node.mode() == NORMAL) { if (node.tunBridge() == null) { String deviceIdStr = genDpid(deviceIdCounter.incrementAndGet()); checkNotNull(deviceIdStr, ERR_NULL_DEVICE_ID); tunNode = localNode.updateTunBridge(DeviceId.deviceId(deviceIdStr)); checkArgument(!hasTunBridge(tunNode.tunBridge(), tunNode.hostname()), NOT_DUPLICATED_MSG, tunNode.tunBridge()); } else { tunNode = localNode; checkArgument(!hasTunBridge(tunNode.tunBridge(), tunNode.hostname()), NOT_DUPLICATED_MSG, tunNode.tunBridge()); } nodeStore.createNode(tunNode); } else { nodeStore.createNode(localNode); } log.info(String.format(MSG_NODE, extNode.hostname(), MSG_CREATED)); }
@Test(expected = NullPointerException.class) public void testCreateNullNode() { target.createNode(null); }
@Override public RedisClusterNode clusterGetNodeForSlot(int slot) { Iterable<RedisClusterNode> res = clusterGetNodes(); for (RedisClusterNode redisClusterNode : res) { if (redisClusterNode.isMaster() && redisClusterNode.getSlotRange().contains(slot)) { return redisClusterNode; } } return null; }
@Test public void testClusterGetNodeForSlot() { RedisClusterNode node1 = connection.clusterGetNodeForSlot(1); RedisClusterNode node2 = connection.clusterGetNodeForSlot(16000); assertThat(node1.getId()).isNotEqualTo(node2.getId()); }
@Override public void execute(Runnable command) { if (shutdown.get()) { throw new RejectedExecutionException("Executor[" + name + "] was shut down."); } if (!taskQ.offer(command)) { throw new RejectedExecutionException("Executor[" + name + "] is overloaded!"); } addNewWorkerIfRequired(); }
@Test public void execute() { final int taskCount = 10; ManagedExecutorService executorService = newManagedExecutorService(1, taskCount); final CountDownLatch latch = new CountDownLatch(taskCount); for (int i = 0; i < taskCount; i++) { executorService.execute(latch::countDown); } assertOpenEventually(latch); }
public static String saslName(String username) { String replace1 = EQUAL.matcher(username).replaceAll(Matcher.quoteReplacement("=3D")); return COMMA.matcher(replace1).replaceAll(Matcher.quoteReplacement("=2C")); }
@Test public void saslName() { String[] usernames = {"user1", "123", "1,2", "user=A", "user==B", "user,1", "user 1", ",", "=", ",=", "=="}; for (String username : usernames) { String saslName = ScramFormatter.saslName(username); // There should be no commas in saslName (comma is used as field separator in SASL messages) assertEquals(-1, saslName.indexOf(',')); // There should be no "=" in the saslName apart from those used in encoding (comma is =2C and equals is =3D) assertEquals(-1, saslName.replace("=2C", "").replace("=3D", "").indexOf('=')); assertEquals(username, ScramFormatter.username(saslName)); } }
public static List<TierFactory> initializeTierFactories(Configuration configuration) { String externalTierFactoryClass = configuration.get( NettyShuffleEnvironmentOptions .NETWORK_HYBRID_SHUFFLE_EXTERNAL_REMOTE_TIER_FACTORY_CLASS_NAME); if (externalTierFactoryClass != null) { return Collections.singletonList( createExternalTierFactory(configuration, externalTierFactoryClass)); } else { return getEphemeralTierFactories(configuration); } }
@Test void testInitEphemeralTiers() { Configuration configuration = new Configuration(); List<TierFactory> tierFactories = TierFactoryInitializer.initializeTierFactories(configuration); assertThat(tierFactories).hasSize(2); assertThat(tierFactories.get(0)).isInstanceOf(MemoryTierFactory.class); assertThat(tierFactories.get(1)).isInstanceOf(DiskTierFactory.class); }
@PostMapping(value = "/artifact/download") public ResponseEntity<String> importArtifact(@RequestParam(value = "url", required = true) String url, @RequestParam(value = "mainArtifact", defaultValue = "true") boolean mainArtifact, @RequestParam(value = "secretName", required = false) String secretName) { if (!url.isEmpty()) { List<Service> services = null; Secret secret = null; if (secretName != null) { secret = secretRepository.findByName(secretName).stream().findFirst().orElse(null); log.debug("Secret {} was requested. Have we found it? {}", secretName, (secret != null)); } try { // Download remote to local file before import. HTTPDownloader.FileAndHeaders fileAndHeaders = HTTPDownloader.handleHTTPDownloadToFileAndHeaders(url, secret, true); File localFile = fileAndHeaders.getLocalFile(); // Now try importing services. services = serviceService.importServiceDefinition(localFile, new ReferenceResolver(url, secret, true, RelativeReferenceURLBuilderFactory .getRelativeReferenceURLBuilder(fileAndHeaders.getResponseHeaders())), new ArtifactInfo(url, mainArtifact)); } catch (IOException ioe) { log.error("Exception while retrieving remote item " + url, ioe); return new ResponseEntity<>("Exception while retrieving remote item", HttpStatus.INTERNAL_SERVER_ERROR); } catch (MockRepositoryImportException mrie) { return new ResponseEntity<>(mrie.getMessage(), HttpStatus.BAD_REQUEST); } if (services != null && !services.isEmpty()) { return new ResponseEntity<>( "{\"name\": \"" + services.get(0).getName() + ":" + services.get(0).getVersion() + "\"}", HttpStatus.CREATED); } } return new ResponseEntity<>(HttpStatus.NO_CONTENT); }
@Test void shouldReturnNoContentWhenTheServiceHasNotBeenCreated() throws MockRepositoryImportException { // arrange Mockito.when(serviceService.importServiceDefinition(Mockito.any(File.class), Mockito.any(ReferenceResolver.class), Mockito.any(ArtifactInfo.class))).thenReturn(Collections.emptyList()); String wrongUrl = "https://raw.githubusercontent.com/microcks/microcks/master/samples/APIPastry-openapi.yaml"; // act ResponseEntity<String> responseEntity = sut.importArtifact(wrongUrl, false, null); // assert Assertions.assertThat(responseEntity.getStatusCode()).isEqualTo(HttpStatus.NO_CONTENT); }
public static String[] split(String splittee, String splitChar, boolean truncate) { //NOSONAR if (splittee == null || splitChar == null) { return new String[0]; } final String EMPTY_ELEMENT = ""; int spot; final int splitLength = splitChar.length(); final String adjacentSplit = splitChar + splitChar; final int adjacentSplitLength = adjacentSplit.length(); if (truncate) { while ((spot = splittee.indexOf(adjacentSplit)) != -1) { splittee = splittee.substring(0, spot + splitLength) + splittee.substring(spot + adjacentSplitLength, splittee.length()); } if (splittee.startsWith(splitChar)) { splittee = splittee.substring(splitLength); } if (splittee.endsWith(splitChar)) { // Remove trailing splitter splittee = splittee.substring(0, splittee.length() - splitLength); } } List<String> returns = new ArrayList<>(); final int length = splittee.length(); // This is the new length int start = 0; spot = 0; while (start < length && (spot = splittee.indexOf(splitChar, start)) > -1) { if (spot > 0) { returns.add(splittee.substring(start, spot)); } else { returns.add(EMPTY_ELEMENT); } start = spot + splitLength; } if (start < length) { returns.add(splittee.substring(start)); } else if (spot == length - splitLength) {// Found splitChar at end of line returns.add(EMPTY_ELEMENT); } return returns.toArray(new String[returns.size()]); }
@Test public void testSplitStringStringTrueWithLeadingComplexSplitCharacters() { // Test leading split characters assertThat(JOrphanUtils.split(" , ,a ,bc", " ,", true), CoreMatchers.equalTo(new String[]{"a", "bc"})); }
@Override public boolean hasAnySuperAdmin(Collection<Long> ids) { if (CollectionUtil.isEmpty(ids)) { return false; } RoleServiceImpl self = getSelf(); return ids.stream().anyMatch(id -> { RoleDO role = self.getRoleFromCache(id); return role != null && RoleCodeEnum.isSuperAdmin(role.getCode()); }); }
@Test public void testHasAnySuperAdmin_true() { try (MockedStatic<SpringUtil> springUtilMockedStatic = mockStatic(SpringUtil.class)) { springUtilMockedStatic.when(() -> SpringUtil.getBean(eq(RoleServiceImpl.class))) .thenReturn(roleService); // mock 数据 RoleDO dbRole = randomPojo(RoleDO.class).setCode("super_admin"); roleMapper.insert(dbRole); // 准备参数 Long id = dbRole.getId(); // 调用,并调用 assertTrue(roleService.hasAnySuperAdmin(singletonList(id))); } }
public Date getEndOfNextNthPeriod(Date now, int numPeriods) { Calendar cal = this; cal.setTime(now); roundDownTime(cal, this.datePattern); switch (this.periodicityType) { case TOP_OF_MILLISECOND: cal.add(Calendar.MILLISECOND, numPeriods); break; case TOP_OF_SECOND: cal.add(Calendar.SECOND, numPeriods); break; case TOP_OF_MINUTE: cal.add(Calendar.MINUTE, numPeriods); break; case TOP_OF_HOUR: cal.add(Calendar.HOUR_OF_DAY, numPeriods); break; case TOP_OF_DAY: cal.add(Calendar.DATE, numPeriods); break; case TOP_OF_WEEK: cal.set(Calendar.DAY_OF_WEEK, cal.getFirstDayOfWeek()); cal.add(Calendar.WEEK_OF_YEAR, numPeriods); break; case TOP_OF_MONTH: cal.add(Calendar.MONTH, numPeriods); break; default: throw new IllegalStateException("Unknown periodicity type."); } return cal.getTime(); }
@Test public void roundsDateWithMissingTimeUnits() throws ParseException { final Date REF_DATE = parseDate("yyyy-MM-dd HH:mm:ss.SSS", "2000-12-25 09:30:49.876"); Calendar cal = getEndOfNextNthPeriod("yyyy-MM-dd-ss", REF_DATE, -1); assertEquals(2000, cal.get(Calendar.YEAR)); assertEquals(Calendar.DECEMBER, cal.get(Calendar.MONTH)); assertEquals(25, cal.get(Calendar.DAY_OF_MONTH)); assertEquals(0, cal.get(Calendar.HOUR_OF_DAY)); assertEquals(0, cal.get(Calendar.MINUTE)); assertEquals(48, cal.get(Calendar.SECOND)); assertEquals(0, cal.get(Calendar.MILLISECOND)); }
@Override public Future<RestResponse> restRequest(RestRequest request) { return restRequest(request, new RequestContext()); }
@Test(dataProvider = "isD2Async") public void testRequest(boolean isD2Async) throws Exception { AtomicReference<ServiceProperties> serviceProperties = new AtomicReference<>(); serviceProperties.set(createServiceProperties(null)); BackupRequestsClient client = createClient(serviceProperties::get, isD2Async); URI uri = URI.create("d2://testService"); RestRequest restRequest = new RestRequestBuilder(uri).setEntity(CONTENT).build(); Future<RestResponse> response = client.restRequest(restRequest); assertEquals(response.get().getStatus(), 200); }
public boolean liveness() { if (!Health.Status.GREEN.equals(dbConnectionNodeCheck.check().getStatus())) { return false; } if (!Health.Status.GREEN.equals(webServerStatusNodeCheck.check().getStatus())) { return false; } if (!Health.Status.GREEN.equals(ceStatusNodeCheck.check().getStatus())) { return false; } if (esStatusNodeCheck != null && Health.Status.RED.equals(esStatusNodeCheck.check().getStatus())) { return false; } return true; }
@Test public void success_when_db_web_ce_es_succeed() { when(dbConnectionNodeCheck.check()).thenReturn(Health.GREEN); when(webServerStatusNodeCheck.check()).thenReturn(Health.GREEN); when(ceStatusNodeCheck.check()).thenReturn(Health.GREEN); when(esStatusNodeCheck.check()).thenReturn(Health.GREEN); Assertions.assertThat(underTest.liveness()).isTrue(); }
public boolean evaluate( RowMetaInterface rowMeta, Object[] r ) { // Start of evaluate boolean retval = false; // If we have 0 items in the list, evaluate the current condition // Otherwise, evaluate all sub-conditions // try { if ( isAtomic() ) { if ( function == FUNC_TRUE ) { return !negate; } // Get fieldnrs left value // // Check out the fieldnrs if we don't have them... if ( leftValuename != null && leftValuename.length() > 0 ) { leftFieldnr = rowMeta.indexOfValue( leftValuename ); } // Get fieldnrs right value // if ( rightValuename != null && rightValuename.length() > 0 ) { rightFieldnr = rowMeta.indexOfValue( rightValuename ); } // Get fieldnrs left field ValueMetaInterface fieldMeta = null; Object field = null; if ( leftFieldnr >= 0 ) { fieldMeta = rowMeta.getValueMeta( leftFieldnr ); field = r[ leftFieldnr ]; } else { return false; // no fields to evaluate } // Get fieldnrs right exact ValueMetaInterface fieldMeta2 = rightExact != null ? rightExact.getValueMeta() : null; Object field2 = rightExact != null ? rightExact.getValueData() : null; if ( field2 == null && rightFieldnr >= 0 ) { fieldMeta2 = rowMeta.getValueMeta( rightFieldnr ); field2 = r[ rightFieldnr ]; } // Evaluate switch ( function ) { case FUNC_EQUAL: retval = ( fieldMeta.compare( field, fieldMeta2, field2 ) == 0 ); break; case FUNC_NOT_EQUAL: retval = ( fieldMeta.compare( field, fieldMeta2, field2 ) != 0 ); break; case FUNC_SMALLER: // Added this if/else to accommodate for CUST-270 if ( "Y".equalsIgnoreCase( System.getProperty( Const.KETTLE_FILTER_TREAT_NULLS_AS_NOT_ZERO, "N" ) ) && fieldMeta.isNull( field ) ) { retval = false; } else { retval = ( fieldMeta.compare( field, fieldMeta2, field2 ) < 0 ); } break; case FUNC_SMALLER_EQUAL: // Added this if/else to accommodate for CUST-270 if ( "Y".equalsIgnoreCase( System.getProperty( Const.KETTLE_FILTER_TREAT_NULLS_AS_NOT_ZERO, "N" ) ) && fieldMeta.isNull( field ) ) { retval = false; } else { retval = ( fieldMeta.compare( field, fieldMeta2, field2 ) <= 0 ); } break; case FUNC_LARGER: retval = ( fieldMeta.compare( field, fieldMeta2, field2 ) > 0 ); break; case FUNC_LARGER_EQUAL: retval = ( fieldMeta.compare( field, fieldMeta2, field2 ) >= 0 ); break; case FUNC_REGEXP: if ( fieldMeta.isNull( field ) || field2 == null ) { retval = false; } else { retval = Pattern .matches( fieldMeta2.getCompatibleString( field2 ), fieldMeta.getCompatibleString( field ) ); } break; case FUNC_NULL: retval = ( fieldMeta.isNull( field ) ); break; case FUNC_NOT_NULL: retval = ( !fieldMeta.isNull( field ) ); break; case FUNC_IN_LIST: // performance reason: create the array first or again when it is against a field and not a constant // if ( inList == null || rightFieldnr >= 0 ) { inList = Const.splitString( fieldMeta2.getString( field2 ), ';', true ); for ( int i = 0; i < inList.length; i++ ) { inList[i] = inList[i] == null ? null : inList[i].replace( "\\", "" ); } Arrays.sort( inList ); } String searchString = fieldMeta.getCompatibleString( field ); int inIndex = -1; if ( searchString != null ) { inIndex = Arrays.binarySearch( inList, searchString ); } retval = inIndex >= 0; break; case FUNC_CONTAINS: String fm2CompatibleContains = fieldMeta2.getCompatibleString( field2 ); retval = Optional.ofNullable( fieldMeta.getCompatibleString( field ) ) .filter( s -> s.contains( fm2CompatibleContains ) ).isPresent(); break; case FUNC_STARTS_WITH: String fm2CompatibleStarts = fieldMeta2.getCompatibleString( field2 ); retval = Optional.ofNullable( fieldMeta.getCompatibleString( field ) ) .filter( s -> s.startsWith( fm2CompatibleStarts ) ).isPresent(); break; case FUNC_ENDS_WITH: String string = fieldMeta.getCompatibleString( field ); if ( !Utils.isEmpty( string ) ) { if ( rightString == null && field2 != null ) { rightString = fieldMeta2.getCompatibleString( field2 ); } if ( rightString != null ) { retval = string.endsWith( fieldMeta2.getCompatibleString( field2 ) ); } else { retval = false; } } else { retval = false; } break; case FUNC_LIKE: // Converts to a regular expression // TODO: optimize the patterns and String replacements // if ( fieldMeta.isNull( field ) || field2 == null ) { retval = false; } else { String regex = fieldMeta2.getCompatibleString( field2 ); regex = regex.replace( "%", ".*" ); regex = regex.replace( "?", "." ); retval = Pattern.matches( regex, fieldMeta.getCompatibleString( field ) ); } break; default: break; } // Only NOT makes sense, the rest doesn't, so ignore!!!! // Optionally negate // if ( isNegated() ) { retval = !retval; } } else { // Composite : get first Condition cb0 = list.get( 0 ); retval = cb0.evaluate( rowMeta, r ); // Loop over the conditions listed below. // for ( int i = 1; i < list.size(); i++ ) { // Composite : #i // Get right hand condition Condition cb = list.get( i ); // Evaluate the right hand side of the condition cb.evaluate() within // the switch statement // because the condition may be short-circuited due to the left hand // side (retval) switch ( cb.getOperator() ) { case Condition.OPERATOR_OR: retval = retval || cb.evaluate( rowMeta, r ); break; case Condition.OPERATOR_AND: retval = retval && cb.evaluate( rowMeta, r ); break; case Condition.OPERATOR_OR_NOT: retval = retval || ( !cb.evaluate( rowMeta, r ) ); break; case Condition.OPERATOR_AND_NOT: retval = retval && ( !cb.evaluate( rowMeta, r ) ); break; case Condition.OPERATOR_XOR: retval = retval ^ cb.evaluate( rowMeta, r ); break; default: break; } } // Composite: optionally negate if ( isNegated() ) { retval = !retval; } } } catch ( Exception e ) { throw new RuntimeException( "Unexpected error evaluation condition [" + toString() + "]", e ); } return retval; }
@Test public void testZeroSmallerOrEqualsThanNull() { String left = "left"; String right = "right"; Long leftValue = 0L; Long rightValue = null; RowMetaInterface rowMeta = new RowMeta(); rowMeta.addValueMeta( new ValueMetaInteger( left ) ); rowMeta.addValueMeta( new ValueMetaInteger( right ) ); Condition condition = new Condition( left, Condition.FUNC_SMALLER_EQUAL, right, null ); assertFalse( condition.evaluate( rowMeta, new Object[] { leftValue, rightValue } ) ); }
public void replay(TopicRecord record) { Uuid existingUuid = topicsByName.put(record.name(), record.topicId()); if (existingUuid != null) { // We don't currently support sending a second TopicRecord for the same topic name... // unless, of course, there is a RemoveTopicRecord in between. if (existingUuid.equals(record.topicId())) { throw new RuntimeException("Found duplicate TopicRecord for " + record.name() + " with topic ID " + record.topicId()); } else { throw new RuntimeException("Found duplicate TopicRecord for " + record.name() + " with a different ID than before. Previous ID was " + existingUuid + " and new ID is " + record.topicId()); } } if (Topic.hasCollisionChars(record.name())) { String normalizedName = Topic.unifyCollisionChars(record.name()); TimelineHashSet<String> topicNames = topicsWithCollisionChars.get(normalizedName); if (topicNames == null) { topicNames = new TimelineHashSet<>(snapshotRegistry, 1); topicsWithCollisionChars.put(normalizedName, topicNames); } topicNames.add(record.name()); } topics.put(record.topicId(), new TopicControlInfo(record.name(), snapshotRegistry, record.topicId())); log.info("Replayed TopicRecord for topic {} with topic ID {}.", record.name(), record.topicId()); }
@Test public void testDuplicateTopicIdReplay() { ReplicationControlTestContext ctx = new ReplicationControlTestContext.Builder().build(); ReplicationControlManager replicationControl = ctx.replicationControl; replicationControl.replay(new TopicRecord(). setName("foo"). setTopicId(Uuid.fromString("Ktv3YkMQRe-MId4VkkrMyw"))); assertEquals("Found duplicate TopicRecord for foo with topic ID Ktv3YkMQRe-MId4VkkrMyw", assertThrows(RuntimeException.class, () -> replicationControl.replay(new TopicRecord(). setName("foo"). setTopicId(Uuid.fromString("Ktv3YkMQRe-MId4VkkrMyw")))). getMessage()); assertEquals("Found duplicate TopicRecord for foo with a different ID than before. " + "Previous ID was Ktv3YkMQRe-MId4VkkrMyw and new ID is 8auUWq8zQqe_99H_m2LAmw", assertThrows(RuntimeException.class, () -> replicationControl.replay(new TopicRecord(). setName("foo"). setTopicId(Uuid.fromString("8auUWq8zQqe_99H_m2LAmw")))). getMessage()); }
@Override public Set<Entry<CharSequence, V>> entrySet() { Set<Entry<CharSequence, V>> entrySet = Sets.newHashSet(); for (Entry<CharSequenceWrapper, V> entry : wrapperMap.entrySet()) { entrySet.add(new CharSequenceEntry<>(entry)); } return entrySet; }
@Test public void testEntrySet() { CharSequenceMap<String> map = CharSequenceMap.create(); map.put("key1", "value1"); map.put(new StringBuilder("key2"), "value2"); assertThat(map.entrySet()).hasSize(2); }
@Override protected void processRecord(RowData row) { synchronized (resultLock) { boolean isInsertOp = row.getRowKind() == RowKind.INSERT || row.getRowKind() == RowKind.UPDATE_AFTER; // Always set the RowKind to INSERT, so that we can compare rows correctly (RowKind will // be ignored), row.setRowKind(RowKind.INSERT); // insert if (isInsertOp) { processInsert(row); } // delete else { processDelete(row); } } }
@Test void testLimitedSnapshot() { final ResolvedSchema schema = ResolvedSchema.physical( new String[] {"f0", "f1"}, new DataType[] {DataTypes.STRING(), DataTypes.INT()}); @SuppressWarnings({"unchecked", "rawtypes"}) final DataStructureConverter<RowData, Row> rowConverter = (DataStructureConverter) DataStructureConverters.getConverter(schema.toPhysicalRowDataType()); // limit the materialized table to 2 rows // with 3 rows overcommitment try (TestMaterializedCollectStreamResult result = new TestMaterializedCollectStreamResult( CliClientTestUtils.createTestClient(schema), 2, 3, createInternalBinaryRowDataConverter(schema.toPhysicalRowDataType()))) { result.isRetrieving = true; result.processRecord(Row.ofKind(RowKind.INSERT, "D", 1)); result.processRecord(Row.ofKind(RowKind.INSERT, "A", 1)); result.processRecord(Row.ofKind(RowKind.INSERT, "B", 1)); result.processRecord(Row.ofKind(RowKind.INSERT, "A", 1)); assertRowEquals( Arrays.asList( null, null, Row.ofKind(RowKind.INSERT, "B", 1), Row.ofKind(RowKind.INSERT, "A", 1)), // two over-committed rows result.getMaterializedTable(), rowConverter); assertThat(result.snapshot(1)).isEqualTo(TypedResult.payload(2)); assertRowEquals( Collections.singletonList(Row.ofKind(RowKind.INSERT, "B", 1)), result.retrievePage(1), rowConverter); assertRowEquals( Collections.singletonList(Row.ofKind(RowKind.INSERT, "A", 1)), result.retrievePage(2), rowConverter); result.processRecord(Row.ofKind(RowKind.INSERT, "C", 1)); assertRowEquals( Arrays.asList( Row.ofKind(RowKind.INSERT, "A", 1), Row.ofKind(RowKind.INSERT, "C", 1)), // limit clean up has taken place result.getMaterializedTable(), rowConverter); result.processRecord(Row.ofKind(RowKind.DELETE, "A", 1)); assertRowEquals( Collections.singletonList( Row.ofKind(RowKind.INSERT, "C", 1)), // regular clean up has taken place result.getMaterializedTable(), rowConverter); } }
public static String getClientIp(ServerHttpRequest request) { for (String header : IP_HEADER_NAMES) { String ipList = request.getHeaders().getFirst(header); if (StringUtils.hasText(ipList) && !UNKNOWN.equalsIgnoreCase(ipList)) { String[] ips = ipList.trim().split("[,;]"); for (String ip : ips) { if (StringUtils.hasText(ip) && !UNKNOWN.equalsIgnoreCase(ip)) { return ip; } } } } var remoteAddress = request.getRemoteAddress(); return remoteAddress == null || remoteAddress.isUnresolved() ? UNKNOWN : remoteAddress.getAddress().getHostAddress(); }
@Test void testGetUnknownIPAddressWhenRemoteAddressIsNull() { var request = MockServerHttpRequest.get("/").build(); var actual = IpAddressUtils.getClientIp(request); assertEquals(IpAddressUtils.UNKNOWN, actual); }
public static <FnT extends DoFn<?, ?>> DoFnSignature getSignature(Class<FnT> fn) { return signatureCache.computeIfAbsent(fn, DoFnSignatures::parseSignature); }
@Test public void testStateParameterDuplicate() throws Exception { thrown.expect(IllegalArgumentException.class); thrown.expectMessage("duplicate"); thrown.expectMessage("my-id"); thrown.expectMessage("myProcessElement"); thrown.expectMessage("index 2"); thrown.expectMessage(not(mentionsTimers())); DoFnSignatures.getSignature( new DoFn<KV<String, Integer>, Long>() { @StateId("my-id") private final StateSpec<ValueState<Integer>> myfield = StateSpecs.value(VarIntCoder.of()); @ProcessElement public void myProcessElement( ProcessContext context, @StateId("my-id") ValueState<Integer> one, @StateId("my-id") ValueState<Integer> two) {} }.getClass()); }
public static Locale createLocale( String localeCode ) { if ( Utils.isEmpty( localeCode ) ) { return null; } StringTokenizer parser = new StringTokenizer( localeCode, "_" ); if ( parser.countTokens() == 2 ) { return new Locale( parser.nextToken(), parser.nextToken() ); } if ( parser.countTokens() == 3 ) { return new Locale( parser.nextToken(), parser.nextToken(), parser.nextToken() ); } return new Locale( localeCode ); }
@Test public void createLocale_DoubleCode_Variant() throws Exception { assertEquals( new Locale( "no", "NO", "NY" ), EnvUtil.createLocale( "no_NO_NY" ) ); }
public static ListenableFuture<CustomerId> findEntityIdAsync(TbContext ctx, EntityId originator) { switch (originator.getEntityType()) { case CUSTOMER: return Futures.immediateFuture((CustomerId) originator); case USER: return toCustomerIdAsync(ctx, ctx.getUserService().findUserByIdAsync(ctx.getTenantId(), (UserId) originator)); case ASSET: return toCustomerIdAsync(ctx, ctx.getAssetService().findAssetByIdAsync(ctx.getTenantId(), (AssetId) originator)); case DEVICE: return toCustomerIdAsync(ctx, Futures.immediateFuture(ctx.getDeviceService().findDeviceById(ctx.getTenantId(), (DeviceId) originator))); default: return Futures.immediateFailedFuture(new TbNodeException("Unexpected originator EntityType: " + originator.getEntityType())); } }
@Test public void givenUserEntityType_whenFindEntityIdAsync_thenOK() throws ExecutionException, InterruptedException { // GIVEN var user = new User(new UserId(UUID.randomUUID())); var expectedCustomerId = new CustomerId(UUID.randomUUID()); user.setCustomerId(expectedCustomerId); when(ctxMock.getUserService()).thenReturn(userServiceMock); doReturn(Futures.immediateFuture(user)).when(userServiceMock).findUserByIdAsync(any(), any()); when(ctxMock.getDbCallbackExecutor()).thenReturn(DB_EXECUTOR); // WHEN var actualCustomerId = EntitiesCustomerIdAsyncLoader.findEntityIdAsync(ctxMock, user.getId()).get(); // THEN assertEquals(expectedCustomerId, actualCustomerId); }
public void readRep( Repository rep, IMetaStore metaStore, ObjectId id_step, List<DatabaseMeta> databases ) throws KettleException { // Load the URL // setUrl( rep.getStepAttributeString( id_step, "wsUrl" ) ); // Load the operation // setOperationName( rep.getStepAttributeString( id_step, "wsOperation" ) ); setOperationRequestName( rep.getStepAttributeString( id_step, "wsOperationRequest" ) ); setOperationNamespace( rep.getStepAttributeString( id_step, "wsOperationNamespace" ) ); setInFieldContainerName( rep.getStepAttributeString( id_step, "wsInFieldContainer" ) ); setInFieldArgumentName( rep.getStepAttributeString( id_step, "wsInFieldArgument" ) ); setOutFieldContainerName( rep.getStepAttributeString( id_step, "wsOutFieldContainer" ) ); setOutFieldArgumentName( rep.getStepAttributeString( id_step, "wsOutFieldArgument" ) ); setProxyHost( rep.getStepAttributeString( id_step, "proxyHost" ) ); setProxyPort( rep.getStepAttributeString( id_step, "proxyPort" ) ); setHttpLogin( rep.getStepAttributeString( id_step, "httpLogin" ) ); setHttpPassword( rep.getStepAttributeString( id_step, "httpPassword" ) ); setCallStep( (int) rep.getStepAttributeInteger( id_step, "callStep" ) ); setPassingInputData( rep.getStepAttributeBoolean( id_step, "passingInputData" ) ); setCompatible( rep.getStepAttributeBoolean( id_step, 0, "compatible", true ) ); // Default to true for backward // compatibility setRepeatingElementName( rep.getStepAttributeString( id_step, "repeating_element" ) ); setReturningReplyAsString( rep.getStepAttributeBoolean( id_step, 0, "reply_as_string" ) ); // Load the input fields mapping // int nb = rep.countNrStepAttributes( id_step, "fieldIn_ws_name" ); getFieldsIn().clear(); for ( int i = 0; i < nb; ++i ) { WebServiceField field = new WebServiceField(); field.setName( rep.getStepAttributeString( id_step, i, "fieldIn_name" ) ); field.setWsName( rep.getStepAttributeString( id_step, i, "fieldIn_ws_name" ) ); field.setXsdType( rep.getStepAttributeString( id_step, i, "fieldIn_xsd_type" ) ); getFieldsIn().add( field ); } // Load the output fields mapping // nb = rep.countNrStepAttributes( id_step, "fieldOut_ws_name" ); getFieldsOut().clear(); for ( int i = 0; i < nb; ++i ) { WebServiceField field = new WebServiceField(); field.setName( rep.getStepAttributeString( id_step, i, "fieldOut_name" ) ); field.setWsName( rep.getStepAttributeString( id_step, i, "fieldOut_ws_name" ) ); field.setXsdType( rep.getStepAttributeString( id_step, i, "fieldOut_xsd_type" ) ); getFieldsOut().add( field ); } }
@Test public void testReadRep() throws Exception { Repository rep = mock( Repository.class ); IMetaStore metastore = mock( IMetaStore.class ); DatabaseMeta dbMeta = mock( DatabaseMeta.class ); StringObjectId id_step = new StringObjectId( "oid" ); when( rep.getStepAttributeString( id_step, "wsOperation" ) ).thenReturn( "GetCurrentExchangeRate" ); when( rep.getStepAttributeString( id_step, "wsOperationRequest" ) ).thenReturn( "opRequest" ); when( rep.getStepAttributeString( id_step, "wsOperationNamespace" ) ).thenReturn( "opNamespace" ); when( rep.getStepAttributeString( id_step, "wsInFieldContainer" ) ).thenReturn( "ifc" ); when( rep.getStepAttributeString( id_step, "wsInFieldArgument" ) ).thenReturn( "ifa" ); when( rep.getStepAttributeString( id_step, "wsOutFieldContainer" ) ).thenReturn( "ofc" ); when( rep.getStepAttributeString( id_step, "wsOutFieldArgument" ) ).thenReturn( "ofa" ); when( rep.getStepAttributeString( id_step, "proxyHost" ) ).thenReturn( "phost" ); when( rep.getStepAttributeString( id_step, "proxyPort" ) ).thenReturn( "1234" ); when( rep.getStepAttributeString( id_step, "httpLogin" ) ).thenReturn( "user" ); when( rep.getStepAttributeString( id_step, "httpPassword" ) ).thenReturn( "password" ); when( rep.getStepAttributeInteger( id_step, "callStep" ) ).thenReturn( 2L ); when( rep.getStepAttributeBoolean( id_step, "passingInputData" ) ).thenReturn( true ); when( rep.getStepAttributeBoolean( id_step, 0, "compatible", true ) ).thenReturn( false ); when( rep.getStepAttributeString( id_step, "repeating_element" ) ).thenReturn( "repeat" ); when( rep.getStepAttributeBoolean( id_step, 0, "reply_as_string" ) ).thenReturn( true ); when( rep.countNrStepAttributes( id_step, "fieldIn_ws_name" ) ) .thenReturn( 2 ); when( rep.getStepAttributeString( id_step, 0, "fieldIn_name" ) ).thenReturn( "bank" ); when( rep.getStepAttributeString( id_step, 0, "fieldIn_ws_name" ) ).thenReturn( "inBank" ); when( rep.getStepAttributeString( id_step, 0, "fieldIn_xsd_type" ) ).thenReturn( "string" ); when( rep.getStepAttributeString( id_step, 1, "fieldIn_name" ) ).thenReturn( "branch" ); when( rep.getStepAttributeString( id_step, 1, "fieldIn_ws_name" ) ).thenReturn( "inBranch" ); when( rep.getStepAttributeString( id_step, 1, "fieldIn_xsd_type" ) ).thenReturn( "string" ); when( rep.countNrStepAttributes( id_step, "fieldOut_ws_name" ) ) .thenReturn( 2 ); when( rep.getStepAttributeString( id_step, 0, "fieldOut_name" ) ).thenReturn( "balance" ); when( rep.getStepAttributeString( id_step, 0, "fieldOut_ws_name" ) ).thenReturn( "outBalance" ); when( rep.getStepAttributeString( id_step, 0, "fieldOut_xsd_type" ) ).thenReturn( "int" ); when( rep.getStepAttributeString( id_step, 1, "fieldOut_name" ) ).thenReturn( "transactions" ); when( rep.getStepAttributeString( id_step, 1, "fieldOut_ws_name" ) ).thenReturn( "outTransactions" ); when( rep.getStepAttributeString( id_step, 1, "fieldOut_xsd_type" ) ).thenReturn( "int" ); WebServiceMeta webServiceMeta = new WebServiceMeta( rep, metastore, id_step, Collections.singletonList( dbMeta ) ); String expectedXml = " <wsURL/>\n" + " <wsOperation>GetCurrentExchangeRate</wsOperation>\n" + " <wsOperationRequest>opRequest</wsOperationRequest>\n" + " <wsOperationNamespace>opNamespace</wsOperationNamespace>\n" + " <wsInFieldContainer>ifc</wsInFieldContainer>\n" + " <wsInFieldArgument>ifa</wsInFieldArgument>\n" + " <wsOutFieldContainer>ofc</wsOutFieldContainer>\n" + " <wsOutFieldArgument>ofa</wsOutFieldArgument>\n" + " <proxyHost>phost</proxyHost>\n" + " <proxyPort>1234</proxyPort>\n" + " <httpLogin>user</httpLogin>\n" + " <httpPassword>password</httpPassword>\n" + " <callStep>2</callStep>\n" + " <passingInputData>Y</passingInputData>\n" + " <compatible>N</compatible>\n" + " <repeating_element>repeat</repeating_element>\n" + " <reply_as_string>Y</reply_as_string>\n" + " <fieldsIn>\n" + " <field>\n" + " <name>bank</name>\n" + " <wsName>inBank</wsName>\n" + " <xsdType>string</xsdType>\n" + " </field>\n" + " <field>\n" + " <name>branch</name>\n" + " <wsName>inBranch</wsName>\n" + " <xsdType>string</xsdType>\n" + " </field>\n" + " </fieldsIn>\n" + " <fieldsOut>\n" + " <field>\n" + " <name>balance</name>\n" + " <wsName>outBalance</wsName>\n" + " <xsdType>int</xsdType>\n" + " </field>\n" + " <field>\n" + " <name>transactions</name>\n" + " <wsName>outTransactions</wsName>\n" + " <xsdType>int</xsdType>\n" + " </field>\n" + " </fieldsOut>\n"; String actualXml = TestUtils.toUnixLineSeparators( webServiceMeta.getXML() ); assertEquals( expectedXml, actualXml ); }
@Override public AgentMetadataDTO toDTO(AgentMetadata agentMetadata) { return new AgentMetadataDTO(agentMetadata.elasticAgentId(), agentMetadata.agentState(), agentMetadata.buildState(), agentMetadata.configState()); }
@Test public void fromDTO_shouldConvertToAgentMetadataDTOFromAgentMetadata() { final AgentMetadata agentMetadata = new AgentMetadata("agent-id", "Idle", "Building", "Enabled"); final AgentMetadataDTO agentMetadataDTO = new AgentMetadataConverterV4().toDTO(agentMetadata); assertThat(agentMetadataDTO.elasticAgentId(), is("agent-id")); assertThat(agentMetadataDTO.agentState(), is("Idle")); assertThat(agentMetadataDTO.buildState(), is("Building")); assertThat(agentMetadataDTO.configState(), is("Enabled")); }
@Override public void receiveConfigInfo(String configInfo) { if (StringUtils.isEmpty(configInfo)) { return; } Properties properties = new Properties(); try { properties.load(new StringReader(configInfo)); innerReceive(properties); } catch (IOException e) { LOGGER.error("load properties error:" + configInfo, e); } }
@Test void testReceiveConfigInfoIsNotProperties() { final Deque<Properties> q2 = new ArrayDeque<Properties>(); PropertiesListener a = new PropertiesListener() { @Override public void innerReceive(Properties properties) { q2.offer(properties); } }; a.receiveConfigInfo(null); final Properties actual = q2.poll(); assertNull(actual); }
@VisibleForTesting int formatSchedulerConf(String webAppAddress, WebResource resource) throws Exception { ClientResponse response = null; resource = (resource != null) ? resource : initializeWebResource(webAppAddress); try { Builder builder; if (UserGroupInformation.isSecurityEnabled()) { builder = resource .path("ws").path("v1").path("cluster") .path("/scheduler-conf/format") .accept(MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON); } else { builder = resource .path("ws").path("v1").path("cluster") .path("/scheduler-conf/format").queryParam("user.name", UserGroupInformation.getCurrentUser().getShortUserName()) .accept(MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON); } response = builder.get(ClientResponse.class); if (response != null) { if (response.getStatus() == Status.OK.getStatusCode()) { System.out.println(response.getEntity(String.class)); return 0; } else { System.err.println("Failed to format scheduler configuration: " + response.getEntity(String.class)); } } else { System.err.println("Failed to format scheduler configuration: " + "null response"); } return -1; } finally { if (response != null) { response.close(); } destroyClient(); } }
@Test(timeout = 10000) public void testFormatSchedulerConf() throws Exception { try { super.setUp(); GuiceServletConfig.setInjector( Guice.createInjector(new WebServletModule())); ResourceScheduler scheduler = rm.getResourceScheduler(); MutableConfigurationProvider provider = ((MutableConfScheduler) scheduler).getMutableConfProvider(); SchedConfUpdateInfo schedUpdateInfo = new SchedConfUpdateInfo(); HashMap<String, String> globalUpdates = new HashMap<>(); globalUpdates.put("schedKey1", "schedVal1"); schedUpdateInfo.setGlobalParams(globalUpdates); LogMutation log = provider.logAndApplyMutation( UserGroupInformation.getCurrentUser(), schedUpdateInfo); rm.getRMContext().getRMAdminService().refreshQueues(); provider.confirmPendingMutation(log, true); Configuration schedulerConf = provider.getConfiguration(); assertEquals("schedVal1", schedulerConf.get("schedKey1")); int exitCode = cli.formatSchedulerConf("", resource()); assertEquals(0, exitCode); schedulerConf = provider.getConfiguration(); assertNull(schedulerConf.get("schedKey1")); } finally { cleanUp(); } }
public String convertInt(int i) { return convert(i); }
@Test public void withBackslash() { FileNamePattern pp = new FileNamePattern("c:\\foo\\bar.%i", context); assertEquals("c:/foo/bar.3", pp.convertInt(3)); }
public static Builder builder() { return new Builder(); }
@Test void throwsFeignExceptionIncludingBody() { server.enqueue(new MockResponse().setBody("success!")); TestInterface api = Feign.builder().decoder((response, type) -> { throw new IOException("timeout"); }) .target(TestInterface.class, "http://localhost:" + server.getPort()); try { api.body("Request body"); } catch (FeignException e) { assertThat(e.getMessage()) .isEqualTo("timeout reading POST http://localhost:" + server.getPort() + "/"); assertThat(e.contentUTF8()).isEqualTo("Request body"); } }
public static TopicPath topicPathFromName(String projectId, String topicName) { return new TopicPath(String.format("projects/%s/topics/%s", projectId, topicName)); }
@Test public void topicPathFromNameWellFormed() { TopicPath path = PubsubClient.topicPathFromName("test", "something"); assertEquals("projects/test/topics/something", path.getPath()); assertEquals("/topics/test/something", path.getFullPath()); assertEquals(ImmutableList.of("test", "something"), path.getDataCatalogSegments()); }