focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
protected static List<String> interpolateSysProps(List<String> forkedJvmArgs) { List<String> ret = new ArrayList<>(); for (String arg : forkedJvmArgs) { if (arg.startsWith("-D")) { String interpolated = interpolate(arg); ret.add(interpolated); } else { ret.add(arg); } } return ret; }
@Test public void testInterpolation() throws Exception { List<String> input = new ArrayList<>(); System.setProperty("logpath", "qwertyuiop"); System.setProperty("logslash", "qwerty\\uiop"); try { input.add("-Dlogpath=\"${sys:logpath}\""); input.add("-Dlogpath=no-interpolation"); input.add("-Xlogpath=\"${sys:logpath}\""); input.add("-Dlogpath=\"${sys:logslash}\""); List<String> output = TikaServerConfig.interpolateSysProps(input); assertEquals("-Dlogpath=\"qwertyuiop\"", output.get(0)); assertEquals("-Dlogpath=no-interpolation", output.get(1)); assertEquals("-Xlogpath=\"${sys:logpath}\"", output.get(2)); assertEquals("-Dlogpath=\"qwerty\\uiop\"", output.get(3)); } finally { System.clearProperty("logpath"); System.clearProperty("logslash"); } }
public static IndexIterationPointer union(IndexIterationPointer left, IndexIterationPointer right, Comparator comparator) { assert left.isDescending() == right.isDescending(); assert left.getLastEntryKeyData() == null && right.lastEntryKeyData == null : "Can merge only initial pointers"; Tuple2<Comparable<?>, Boolean> newFrom = min(left.getFrom(), left.isFromInclusive(), right.getFrom(), right.isFromInclusive(), comparator); Tuple2<Comparable<?>, Boolean> newTo = max(left.getTo(), left.isToInclusive(), right.getTo(), right.isToInclusive(), comparator); return IndexIterationPointer.create(newFrom.f0(), newFrom.f1(), newTo.f0(), newTo.f1(), left.isDescending(), null); }
@Test void unionRange() { assertThat(union(pointer(lessThan(5)), pointer(lessThan(6)), OrderedIndexStore.SPECIAL_AWARE_COMPARATOR)).isEqualTo(pointer(lessThan(6))); assertThat(union(pointer(lessThan(5)), pointer(atMost(6)), OrderedIndexStore.SPECIAL_AWARE_COMPARATOR)).isEqualTo(pointer(atMost(6))); assertThat(union(pointer(atMost(5)), pointer(atMost(6)), OrderedIndexStore.SPECIAL_AWARE_COMPARATOR)).isEqualTo(pointer(atMost(6))); assertThat(union(pointer(atMost(5)), pointer(lessThan(6)), OrderedIndexStore.SPECIAL_AWARE_COMPARATOR)).isEqualTo(pointer(lessThan(6))); assertThat(union(pointer(greaterThan(5)), pointer(greaterThan(6)), OrderedIndexStore.SPECIAL_AWARE_COMPARATOR)).isEqualTo(pointer(greaterThan(5))); assertThat(union(pointer(greaterThan(5)), pointer(atLeast(6)), OrderedIndexStore.SPECIAL_AWARE_COMPARATOR)).isEqualTo(pointer(greaterThan(5))); assertThat(union(pointer(atLeast(5)), pointer(atLeast(6)), OrderedIndexStore.SPECIAL_AWARE_COMPARATOR)).isEqualTo(pointer(atLeast(5))); assertThat(union(pointer(atLeast(5)), pointer(greaterThan(6)), OrderedIndexStore.SPECIAL_AWARE_COMPARATOR)).isEqualTo(pointer(atLeast(5))); }
@Override public void decrement(@Nonnull UUID txnId) { decrement0(txnId, true); }
@Test public void decrement() { UUID txnId = UuidUtil.newSecureUUID(); for (int i = 0; i < 11; i++) { counter.increment(txnId, false); } for (int i = 0; i < 11; i++) { counter.decrement(txnId); } Map<UUID, Long> countPerTxnId = counter.getReservedCapacityCountPerTxnId(); assertNull(countPerTxnId.get(txnId)); assertEquals(0L, nodeWideUsedCapacityCounter.currentValue()); }
@Override public void start() { this.all = registry.meter(name(getName(), "all")); this.trace = registry.meter(name(getName(), "trace")); this.debug = registry.meter(name(getName(), "debug")); this.info = registry.meter(name(getName(), "info")); this.warn = registry.meter(name(getName(), "warn")); this.error = registry.meter(name(getName(), "error")); super.start(); }
@Test public void usesDefaultRegistry() { SharedMetricRegistries.add(InstrumentedAppender.DEFAULT_REGISTRY, registry); final InstrumentedAppender shared = new InstrumentedAppender(); shared.start(); when(event.getLevel()).thenReturn(Level.INFO); shared.doAppend(event); assertThat(SharedMetricRegistries.names()).contains(InstrumentedAppender.DEFAULT_REGISTRY); assertThat(registry.meter(METRIC_NAME_PREFIX + ".info").getCount()) .isEqualTo(1); }
@SuppressWarnings("unchecked") @Override public String doSharding(final Collection<String> availableTargetNames, final PreciseShardingValue<Comparable<?>> shardingValue) { return standardShardingAlgorithm.doSharding(availableTargetNames, shardingValue); }
@Test void assertPreciseDoSharding() { ClassBasedShardingAlgorithm algorithm = (ClassBasedShardingAlgorithm) TypedSPILoader.getService(ShardingAlgorithm.class, "CLASS_BASED", PropertiesBuilder.build(new Property("strategy", "standard"), new Property("algorithmClassName", ClassBasedStandardShardingAlgorithmFixture.class.getName()))); Collection<String> availableTargetNames = Arrays.asList("t_order_0", "t_order_1", "t_order_2", "t_order_3"); assertThat(algorithm.doSharding(availableTargetNames, new PreciseShardingValue<>("t_order", "order_id", new DataNodeInfo("t_order_", 1, '0'), 0)), is("t_order_0")); }
@Override public DefaultSignificantCode addRange(TextRange range) { Preconditions.checkState(this.inputFile != null, "addRange() should be called after on()"); int line = range.start().line(); Preconditions.checkArgument(line == range.end().line(), "Ranges of significant code must be located in a single line"); Preconditions.checkState(!significantCodePerLine.containsKey(line), "Significant code was already reported for line '%s'. Can only report once per line.", line); significantCodePerLine.put(line, range); return this; }
@Test public void fail_if_add_range_before_setting_file() { assertThatThrownBy(() -> underTest.addRange(inputFile.selectLine(1))) .isInstanceOf(IllegalStateException.class) .hasMessage("addRange() should be called after on()"); }
@Override public Long sendSingleSms(String mobile, Long userId, Integer userType, String templateCode, Map<String, Object> templateParams) { // 校验短信模板是否合法 SmsTemplateDO template = validateSmsTemplate(templateCode); // 校验短信渠道是否合法 SmsChannelDO smsChannel = validateSmsChannel(template.getChannelId()); // 校验手机号码是否存在 mobile = validateMobile(mobile); // 构建有序的模板参数。为什么放在这个位置,是提前保证模板参数的正确性,而不是到了插入发送日志 List<KeyValue<String, Object>> newTemplateParams = buildTemplateParams(template, templateParams); // 创建发送日志。如果模板被禁用,则不发送短信,只记录日志 Boolean isSend = CommonStatusEnum.ENABLE.getStatus().equals(template.getStatus()) && CommonStatusEnum.ENABLE.getStatus().equals(smsChannel.getStatus()); String content = smsTemplateService.formatSmsTemplateContent(template.getContent(), templateParams); Long sendLogId = smsLogService.createSmsLog(mobile, userId, userType, isSend, template, content, templateParams); // 发送 MQ 消息,异步执行发送短信 if (isSend) { smsProducer.sendSmsSendMessage(sendLogId, mobile, template.getChannelId(), template.getApiTemplateId(), newTemplateParams); } return sendLogId; }
@Test public void testSendSingleSms_successWhenSmsTemplateDisable() { // 准备参数 String mobile = randomString(); Long userId = randomLongId(); Integer userType = randomEle(UserTypeEnum.values()).getValue(); String templateCode = randomString(); Map<String, Object> templateParams = MapUtil.<String, Object>builder().put("code", "1234") .put("op", "login").build(); // mock SmsTemplateService 的方法 SmsTemplateDO template = randomPojo(SmsTemplateDO.class, o -> { o.setStatus(CommonStatusEnum.DISABLE.getStatus()); o.setContent("验证码为{code}, 操作为{op}"); o.setParams(Lists.newArrayList("code", "op")); }); when(smsTemplateService.getSmsTemplateByCodeFromCache(eq(templateCode))).thenReturn(template); String content = randomString(); when(smsTemplateService.formatSmsTemplateContent(eq(template.getContent()), eq(templateParams))) .thenReturn(content); // mock SmsChannelService 的方法 SmsChannelDO smsChannel = randomPojo(SmsChannelDO.class, o -> o.setStatus(CommonStatusEnum.ENABLE.getStatus())); when(smsChannelService.getSmsChannel(eq(template.getChannelId()))).thenReturn(smsChannel); // mock SmsLogService 的方法 Long smsLogId = randomLongId(); when(smsLogService.createSmsLog(eq(mobile), eq(userId), eq(userType), eq(Boolean.FALSE), eq(template), eq(content), eq(templateParams))).thenReturn(smsLogId); // 调用 Long resultSmsLogId = smsSendService.sendSingleSms(mobile, userId, userType, templateCode, templateParams); // 断言 assertEquals(smsLogId, resultSmsLogId); // 断言调用 verify(smsProducer, times(0)).sendSmsSendMessage(anyLong(), anyString(), anyLong(), any(), anyList()); }
@Udf public List<String> keys(@UdfParameter final String jsonObj) { if (jsonObj == null) { return null; } final JsonNode node = UdfJsonMapper.parseJson(jsonObj); if (node.isMissingNode() || !node.isObject()) { return null; } final List<String> ret = new ArrayList<>(); node.fieldNames().forEachRemaining(ret::add); return ret; }
@Test public void shouldReturnNullForNull() { assertNull(udf.keys("null")); }
public boolean isEnabled() { return !allowedCpusList.isEmpty(); }
@Test public void whenComplex() { ThreadAffinity threadAffinity = new ThreadAffinity("1,3-4,[5-8]:2,10,[20,21,32]:2"); assertTrue(threadAffinity.isEnabled()); assertEquals(8, threadAffinity.allowedCpusList.size()); assertEquals(threadAffinity.allowedCpusList.get(0), newBitset(1)); assertEquals(threadAffinity.allowedCpusList.get(1), newBitset(3)); assertEquals(threadAffinity.allowedCpusList.get(2), newBitset(4)); assertEquals(threadAffinity.allowedCpusList.get(3), newBitset(5, 6, 7, 8)); assertEquals(threadAffinity.allowedCpusList.get(4), newBitset(5, 6, 7, 8)); assertEquals(threadAffinity.allowedCpusList.get(5), newBitset(10)); assertEquals(threadAffinity.allowedCpusList.get(6), newBitset(20, 21, 32)); assertEquals(threadAffinity.allowedCpusList.get(7), newBitset(20, 21, 32)); }
public static Map<String, Set<String>> getService2ClusterMapping(List<Cluster> clusters) { Map<String, Set<String>> nameMapping = new HashMap<>(); for (Cluster cluster : clusters) { if (cluster == null) { continue; } Optional<String> serviceNameFromCluster = getServiceNameFromCluster(cluster.getName()); if (!serviceNameFromCluster.isPresent()) { continue; } String serviceName = serviceNameFromCluster.get(); nameMapping.computeIfAbsent(serviceName, key -> new HashSet<>()).add(cluster.getName()); } return nameMapping; }
@Test public void testGetService2ClusterMapping() { List<Cluster> clusters = Arrays.asList( null, createCluster("outbound|8080||serviceA.default.svc.cluster.local"), createCluster("outbound|8080|subset1|serviceB.default.svc.cluster.local"), createCluster("outbound|8080|subset2|serviceB.default.svc.cluster.local"), createCluster("outbound|8080|serviceC.default.svc.cluster.local"), createCluster(null) ); Map<String, Set<String>> result = XdsProtocolTransformer.getService2ClusterMapping(clusters); Assert.assertEquals(2, result.size()); Assert.assertTrue(result.containsKey("serviceA")); Assert.assertTrue(result.containsKey("serviceB")); Assert.assertEquals(1, result.get("serviceA").size()); Assert.assertEquals(2, result.get("serviceB").size()); }
@Udf(description = "Returns the tangent of an INT value") public Double tan( @UdfParameter( value = "value", description = "The value in radians to get the tangent of." ) final Integer value ) { return tan(value == null ? null : value.doubleValue()); }
@Test public void shouldHandlePositive() { assertThat(udf.tan(0.43), closeTo(0.45862102348555517, 0.000000000000001)); assertThat(udf.tan(Math.PI), closeTo(0, 0.000000000000001)); assertThat(udf.tan(Math.PI * 2), closeTo(0, 0.000000000000001)); assertThat(udf.tan(Math.PI / 2), closeTo(1.633123935319537E16, 0.000000000000001)); assertThat(udf.tan(6), closeTo(-0.29100619138474915, 0.000000000000001)); assertThat(udf.tan(6L), closeTo(-0.29100619138474915, 0.000000000000001)); }
public Account updatePniKeys(final Account account, final IdentityKey pniIdentityKey, final Map<Byte, ECSignedPreKey> pniSignedPreKeys, @Nullable final Map<Byte, KEMSignedPreKey> pniPqLastResortPreKeys, final Map<Byte, Integer> pniRegistrationIds) throws MismatchedDevicesException { validateDevices(account, pniSignedPreKeys, pniPqLastResortPreKeys, pniRegistrationIds); final UUID aci = account.getIdentifier(IdentityType.ACI); final UUID pni = account.getIdentifier(IdentityType.PNI); final Collection<TransactWriteItem> keyWriteItems = buildPniKeyWriteItems(pni, pni, pniSignedPreKeys, pniPqLastResortPreKeys); return redisDeleteAsync(account) .thenCompose(ignored -> keysManager.deleteSingleUsePreKeys(pni)) .thenCompose(ignored -> updateTransactionallyWithRetriesAsync(account, a -> setPniKeys(a, pniIdentityKey, pniRegistrationIds), accounts::updateTransactionallyAsync, () -> accounts.getByAccountIdentifierAsync(aci).thenApply(Optional::orElseThrow), a -> keyWriteItems, AccountChangeValidator.GENERAL_CHANGE_VALIDATOR, MAX_UPDATE_ATTEMPTS)) .join(); }
@Test void testPniPqUpdate_incompleteKeys() { final String number = "+14152222222"; final byte deviceId2 = 2; List<Device> devices = List.of(DevicesHelper.createDevice(Device.PRIMARY_ID, 0L, 101), DevicesHelper.createDevice(deviceId2, 0L, 102)); Account account = AccountsHelper.generateTestAccount(number, UUID.randomUUID(), UUID.randomUUID(), devices, new byte[UnidentifiedAccessUtil.UNIDENTIFIED_ACCESS_KEY_LENGTH]); final ECKeyPair identityKeyPair = Curve.generateKeyPair(); final Map<Byte, ECSignedPreKey> newSignedKeys = Map.of( Device.PRIMARY_ID, KeysHelper.signedECPreKey(1, identityKeyPair), deviceId2, KeysHelper.signedECPreKey(2, identityKeyPair)); final Map<Byte, KEMSignedPreKey> newSignedPqKeys = Map.of( Device.PRIMARY_ID, KeysHelper.signedKEMPreKey(3, identityKeyPair)); Map<Byte, Integer> newRegistrationIds = Map.of(Device.PRIMARY_ID, 201, deviceId2, 202); final IdentityKey pniIdentityKey = new IdentityKey(Curve.generateKeyPair().getPublicKey()); assertThrows(MismatchedDevicesException.class, () -> accountsManager.updatePniKeys(account, pniIdentityKey, newSignedKeys, newSignedPqKeys, newRegistrationIds)); verifyNoInteractions(accounts); verifyNoInteractions(keysManager); }
static boolean isDifferent(Map<String, String> current, Map<String, String> desired) { return !current.equals(desired); }
@Test public void testIsNotDifferent() { Map<String, String> current = new HashMap<>(3); current.put("server.1", "my-cluster-zookeeper-0.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); current.put("server.2", "my-cluster-zookeeper-1.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); current.put("server.3", "my-cluster-zookeeper-2.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); Map<String, String> desired = new HashMap<>(3); desired.put("server.1", "my-cluster-zookeeper-0.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); desired.put("server.2", "my-cluster-zookeeper-1.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); desired.put("server.3", "my-cluster-zookeeper-2.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); assertThat(ZookeeperScaler.isDifferent(current, desired), is(false)); Map<String, String> desired2 = new HashMap<>(3); desired2.put("server.1", "my-cluster-zookeeper-0.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); desired2.put("server.3", "my-cluster-zookeeper-2.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); desired2.put("server.2", "my-cluster-zookeeper-1.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); assertThat(ZookeeperScaler.isDifferent(current, desired2), is(false)); }
static RLMQuotaManagerConfig copyQuotaManagerConfig(RemoteLogManagerConfig rlmConfig) { return new RLMQuotaManagerConfig(rlmConfig.remoteLogManagerCopyMaxBytesPerSecond(), rlmConfig.remoteLogManagerCopyNumQuotaSamples(), rlmConfig.remoteLogManagerCopyQuotaWindowSizeSeconds()); }
@Test public void testCopyQuotaManagerConfig() { Properties defaultProps = new Properties(); defaultProps.put("zookeeper.connect", kafka.utils.TestUtils.MockZkConnect()); appendRLMConfig(defaultProps); KafkaConfig defaultRlmConfig = KafkaConfig.fromProps(defaultProps); RLMQuotaManagerConfig defaultConfig = RemoteLogManager.copyQuotaManagerConfig(defaultRlmConfig.remoteLogManagerConfig()); assertEquals(DEFAULT_REMOTE_LOG_MANAGER_COPY_MAX_BYTES_PER_SECOND, defaultConfig.quotaBytesPerSecond()); assertEquals(DEFAULT_REMOTE_LOG_MANAGER_COPY_QUOTA_WINDOW_NUM, defaultConfig.numQuotaSamples()); assertEquals(DEFAULT_REMOTE_LOG_MANAGER_COPY_QUOTA_WINDOW_SIZE_SECONDS, defaultConfig.quotaWindowSizeSeconds()); Properties customProps = new Properties(); customProps.put("zookeeper.connect", kafka.utils.TestUtils.MockZkConnect()); customProps.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_COPY_MAX_BYTES_PER_SECOND_PROP, 100); customProps.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_COPY_QUOTA_WINDOW_NUM_PROP, 31); customProps.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_COPY_QUOTA_WINDOW_SIZE_SECONDS_PROP, 1); appendRLMConfig(customProps); KafkaConfig config = KafkaConfig.fromProps(customProps); RLMQuotaManagerConfig rlmCopyQuotaManagerConfig = RemoteLogManager.copyQuotaManagerConfig(config.remoteLogManagerConfig()); assertEquals(100L, rlmCopyQuotaManagerConfig.quotaBytesPerSecond()); assertEquals(31, rlmCopyQuotaManagerConfig.numQuotaSamples()); assertEquals(1, rlmCopyQuotaManagerConfig.quotaWindowSizeSeconds()); }
@Override public boolean apply(Collection<Member> members) { if (members.size() < minimumClusterSize) { return false; } int count = 0; long timestamp = Clock.currentTimeMillis(); for (Member member : members) { if (!isAlivePerIcmp(member)) { continue; } if (member.localMember() || failureDetector.isAlive(member, timestamp)) { count++; } } return count >= minimumClusterSize; }
@Test public void testSplitBrainProtectionPresent_whenIcmpAndHeartbeatAlive() { splitBrainProtectionFunction = new ProbabilisticSplitBrainProtectionFunction(splitBrainProtectionSize, 10000, 10000, 200, 100, 10); prepareSplitBrainProtectionFunctionForIcmpFDTest(splitBrainProtectionFunction); // heartbeat each second for all members for 5 seconds heartbeat(5, 1000); pingSuccessfully(); assertTrue(splitBrainProtectionFunction.apply(Arrays.asList(members))); }
public static File zip(String srcPath) throws UtilException { return zip(srcPath, DEFAULT_CHARSET); }
@Test @Disabled public void zipStreamTest(){ //https://github.com/dromara/hutool/issues/944 final String dir = "d:/test"; final String zip = "d:/test.zip"; //noinspection IOStreamConstructor try (final OutputStream out = new FileOutputStream(zip)){ //实际应用中, out 为 HttpServletResponse.getOutputStream ZipUtil.zip(out, Charset.defaultCharset(), false, null, new File(dir)); } catch (final IOException e) { throw new IORuntimeException(e); } }
@Override public void report(SortedMap<MetricName, Gauge> gauges, SortedMap<MetricName, Counter> counters, SortedMap<MetricName, Histogram> histograms, SortedMap<MetricName, Meter> meters, SortedMap<MetricName, Timer> timers) { if (loggerProxy.isEnabled(marker)) { for (Entry<MetricName, Gauge> entry : gauges.entrySet()) { logGauge(entry.getKey(), entry.getValue()); } for (Entry<MetricName, Counter> entry : counters.entrySet()) { logCounter(entry.getKey(), entry.getValue()); } for (Entry<MetricName, Histogram> entry : histograms.entrySet()) { logHistogram(entry.getKey(), entry.getValue()); } for (Entry<MetricName, Meter> entry : meters.entrySet()) { logMeter(entry.getKey(), entry.getValue()); } for (Entry<MetricName, Timer> entry : timers.entrySet()) { logTimer(entry.getKey(), entry.getValue()); } } }
@Test public void reportsMeterValuesAtError() throws Exception { final Meter meter = mock(Meter.class); when(meter.getCount()).thenReturn(1L); when(meter.getMeanRate()).thenReturn(2.0); when(meter.getOneMinuteRate()).thenReturn(3.0); when(meter.getFiveMinuteRate()).thenReturn(4.0); when(meter.getFifteenMinuteRate()).thenReturn(5.0); when(logger.isErrorEnabled(marker)).thenReturn(true); errorReporter.report(this.map(), this.map(), this.map(), map("test.meter", meter), this.map()); verify(logger).error(marker, "type={}, name={}, count={}, mean_rate={}, m1={}, m5={}, m15={}, rate_unit={}", "METER", "test.meter", 1L, 2.0, 3.0, 4.0, 5.0, "events/second"); }
@Override public int run(String launcherVersion, String launcherMd5, ServerUrlGenerator urlGenerator, Map<String, String> env, Map<String, String> context) { int exitValue = 0; LOG.info("Agent launcher is version: {}", CurrentGoCDVersion.getInstance().fullVersion()); String[] command = new String[]{}; try { AgentBootstrapperArgs bootstrapperArgs = AgentBootstrapperArgs.fromProperties(context); ServerBinaryDownloader agentDownloader = new ServerBinaryDownloader(urlGenerator, bootstrapperArgs); agentDownloader.downloadIfNecessary(DownloadableFile.AGENT); ServerBinaryDownloader pluginZipDownloader = new ServerBinaryDownloader(urlGenerator, bootstrapperArgs); pluginZipDownloader.downloadIfNecessary(DownloadableFile.AGENT_PLUGINS); ServerBinaryDownloader tfsImplDownloader = new ServerBinaryDownloader(urlGenerator, bootstrapperArgs); tfsImplDownloader.downloadIfNecessary(DownloadableFile.TFS_IMPL); command = agentInvocationCommand(agentDownloader.getMd5(), launcherMd5, pluginZipDownloader.getMd5(), tfsImplDownloader.getMd5(), env, context, agentDownloader.getExtraProperties()); LOG.info("Launching Agent with command: {}", join(command, " ")); Process agent = invoke(command); // The next lines prevent the child process from blocking on Windows AgentOutputAppender agentOutputAppenderForStdErr = new AgentOutputAppender(GO_AGENT_STDERR_LOG); AgentOutputAppender agentOutputAppenderForStdOut = new AgentOutputAppender(GO_AGENT_STDOUT_LOG); if (new SystemEnvironment().consoleOutToStdout()) { agentOutputAppenderForStdErr.writeTo(AgentOutputAppender.Outstream.STDERR); agentOutputAppenderForStdOut.writeTo(AgentOutputAppender.Outstream.STDOUT); } agent.getOutputStream().close(); AgentConsoleLogThread stdErrThd = new AgentConsoleLogThread(agent.getErrorStream(), agentOutputAppenderForStdErr); stdErrThd.start(); AgentConsoleLogThread stdOutThd = new AgentConsoleLogThread(agent.getInputStream(), agentOutputAppenderForStdOut); stdOutThd.start(); Shutdown shutdownHook = new Shutdown(agent); Runtime.getRuntime().addShutdownHook(shutdownHook); try { exitValue = agent.waitFor(); } catch (InterruptedException ie) { LOG.error("Agent was interrupted. Terminating agent and respawning. {}", ie.toString()); agent.destroy(); } finally { removeShutdownHook(shutdownHook); stdErrThd.stopAndJoin(); stdOutThd.stopAndJoin(); } } catch (Exception e) { LOG.error("Exception while executing command: {} - {}", join(command, " "), e.toString()); exitValue = EXCEPTION_OCCURRED; } return exitValue; }
@Test @Timeout(10) //if it fails with timeout, that means stderr was not flushed public void shouldLogErrorStreamOfSubprocess() throws InterruptedException, IOException { final List<String> cmd = new ArrayList<>(); Process subProcess = mockProcess(); String stdErrMsg = "Mr. Agent writes to stderr!"; when(subProcess.getErrorStream()).thenReturn(new ByteArrayInputStream(stdErrMsg.getBytes())); String stdOutMsg = "Mr. Agent writes to stdout!"; when(subProcess.getInputStream()).thenReturn(new ByteArrayInputStream(stdOutMsg.getBytes())); when(subProcess.waitFor()).thenAnswer(invocation -> 42); AgentProcessParentImpl bootstrapper = createBootstrapper(cmd, subProcess); int returnCode = bootstrapper.run("bootstrapper_version", "bar", getURLGenerator(), new HashMap<>(), context()); assertThat(returnCode, is(42)); assertThat(FileUtils.readFileToString(stderrLog, UTF_8).contains(stdErrMsg), is(true)); assertThat(FileUtils.readFileToString(stdoutLog, UTF_8).contains(stdOutMsg), is(true)); }
@Override public Void resolve(final Local file, final boolean interactive) { return null; }
@Test public void testResolve() { assertNull(new DisabledFilesystemBookmarkResolver().resolve(new NullLocal("/t"), false)); }
@Override public String getName() { return TransformFunctionType.NOT.getName(); }
@Test public void testNotNullLiteral() { ExpressionContext expression = RequestContextUtils.getExpression(String.format("Not(null)", INT_SV_NULL_COLUMN)); TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap); Assert.assertTrue(transformFunction instanceof NotOperatorTransformFunction); Assert.assertEquals(transformFunction.getName(), "not"); int[] expectedValues = new int[NUM_ROWS]; RoaringBitmap roaringBitmap = new RoaringBitmap(); roaringBitmap.add(0L, NUM_ROWS); testTransformFunctionWithNull(transformFunction, expectedValues, roaringBitmap); }
@Override public void remove(NamedNode master) { connection.sync(RedisCommands.SENTINEL_REMOVE, master.getName()); }
@Test public void testRemove() { Collection<RedisServer> masters = connection.masters(); connection.remove(masters.iterator().next()); }
public ValidationResult validateMessagesAndAssignOffsets(PrimitiveRef.LongRef offsetCounter, MetricsRecorder metricsRecorder, BufferSupplier bufferSupplier) { if (sourceCompressionType == CompressionType.NONE && targetCompression.type() == CompressionType.NONE) { // check the magic value if (!records.hasMatchingMagic(toMagic)) return convertAndAssignOffsetsNonCompressed(offsetCounter, metricsRecorder); else // Do in-place validation, offset assignment and maybe set timestamp return assignOffsetsNonCompressed(offsetCounter, metricsRecorder); } else return validateMessagesAndAssignOffsetsCompressed(offsetCounter, metricsRecorder, bufferSupplier); }
@Test public void testOffsetAssignmentAfterDownConversionV2ToV0Compressed() { long offset = 1234567; long now = System.currentTimeMillis(); Compression compression = Compression.gzip().build(); MemoryRecords records = createRecords(RecordBatch.MAGIC_VALUE_V2, now, compression); checkOffsets(records, 0); checkOffsets(new LogValidator( records, new TopicPartition("topic", 0), time, CompressionType.GZIP, compression, false, RecordBatch.MAGIC_VALUE_V0, TimestampType.CREATE_TIME, 5000L, 5000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, AppendOrigin.CLIENT, MetadataVersion.latestTesting() ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() ).validatedRecords, offset); }
@Override protected int compareNonNull(T o1, T o2) { return Comparator.<T> naturalOrder().compare(o1, o2); }
@Test public void should_compare_instances_according_to_their_natural_order() { // GIVEN String s1 = "aaa"; String s2 = "bbb"; // WHEN int less = NATURAL_ORDER_COMPARATOR.compareNonNull(s1, s2); int equal = NATURAL_ORDER_COMPARATOR.compareNonNull(s1, s1); int greater = NATURAL_ORDER_COMPARATOR.compareNonNull(s2, s1); // THEN then(less).isNegative(); then(equal).isZero(); then(greater).isPositive(); }
public String convertToPrintFriendlyString(String phiString) { if (null == phiString) { return NULL_REPLACEMENT_VALUE; } else if (phiString.isEmpty()) { return EMPTY_REPLACEMENT_VALUE; } int conversionLength = (logPhiMaxBytes > 0) ? Integer.min(phiString.length(), logPhiMaxBytes) : phiString.length(); StringBuilder builder = new StringBuilder(conversionLength + STRING_BUFFER_PAD_SIZE); for (int i = 0; i < conversionLength; ++i) { appendCharacterAsPrintFriendlyString(builder, phiString.charAt(i)); } return builder.toString(); }
@Test public void testConvertBytesToPrintFriendlyString() { assertEquals(hl7util.NULL_REPLACEMENT_VALUE, hl7util.convertToPrintFriendlyString((byte[]) null)); assertEquals(hl7util.EMPTY_REPLACEMENT_VALUE, hl7util.convertToPrintFriendlyString(new byte[0])); assertEquals(EXPECTED_MESSAGE, hl7util.convertToPrintFriendlyString(TEST_MESSAGE_BYTES)); }
@Override public CheckpointMarkImpl getCheckpointMark() { // By checkpointing, the runtime indicates it has finished processing all data it has already // pulled. This means we can ask Pub/Sub Lite to refill our in-memory buffer without causing // unbounded memory usage. subscriber.rebuffer(); return new CheckpointMarkImpl(fetchOffset, committer); }
@Test public void getCheckpointMark() throws Exception { startSubscriber(); advancePastMessage(2); CheckpointMarkImpl mark = reader.getCheckpointMark(); verify(subscriber).rebuffer(); assertEquals(3, mark.offset.value()); }
public JsValue eval(InputStream is) { return eval(FileUtils.toString(is)); }
@Test void testBoolean() { assertFalse(je.eval("1 == 2").isTrue()); assertTrue(je.eval("1 == 1").isTrue()); }
@Override public int generate(final Properties props) { int result = loadExistedWorkerId().orElseGet(this::generateNewWorkerId); logWarning(result, props); return result; }
@Test void assertGenerateWithExistedWorkerId() { ClusterPersistRepository repository = mock(ClusterPersistRepository.class); when(repository.query("/nodes/compute_nodes/worker_id/foo_id")).thenReturn("10"); assertThat(new ClusterWorkerIdGenerator(repository, "foo_id").generate(PropertiesBuilder.build(new Property(WorkerIdGenerator.WORKER_ID_KEY, "1"))), is(10)); }
public static File copy(String srcPath, String destPath, boolean isOverride) throws IORuntimeException { return copy(file(srcPath), file(destPath), isOverride); }
@Test public void copyTest() { final File srcFile = FileUtil.file("hutool.jpg"); final File destFile = FileUtil.file("hutool.copy.jpg"); FileUtil.copy(srcFile, destFile, true); assertTrue(destFile.exists()); assertEquals(srcFile.length(), destFile.length()); }
@CanDistro @PatchMapping @Secured(action = ActionTypes.WRITE) public String patch(@RequestParam(defaultValue = Constants.DEFAULT_NAMESPACE_ID) String namespaceId, @RequestParam String serviceName, @RequestParam String ip, @RequestParam(defaultValue = UtilsAndCommons.DEFAULT_CLUSTER_NAME) String cluster, @RequestParam Integer port, @RequestParam Double weight, @RequestParam Boolean enabled, @RequestParam String metadata) throws Exception { NamingUtils.checkServiceNameFormat(serviceName); InstancePatchObject patchObject = new InstancePatchObject(cluster, ip, port); if (StringUtils.isNotBlank(metadata)) { patchObject.setMetadata(UtilsAndCommons.parseMetadata(metadata)); } if (weight != null) { checkWeight(weight); patchObject.setWeight(weight); } if (enabled != null) { patchObject.setEnabled(enabled); } instanceServiceV2.patchInstance(namespaceId, serviceName, patchObject); return "ok"; }
@Test void patch() throws Exception { MockHttpServletRequestBuilder builder = MockMvcRequestBuilders.patch( UtilsAndCommons.DEFAULT_NACOS_NAMING_CONTEXT_V2 + UtilsAndCommons.NACOS_NAMING_INSTANCE_CONTEXT) .param("namespaceId", TEST_NAMESPACE).param("serviceName", TEST_SERVICE_NAME).param("ip", TEST_IP) .param("cluster", TEST_CLUSTER_NAME).param("port", "9999").param("healthy", "true").param("weight", "2.0") .param("enabled", "true").param("metadata", TEST_METADATA).param("ephemeral", "false"); String actualValue = mockmvc.perform(builder).andReturn().getResponse().getContentAsString(); assertEquals("ok", actualValue); }
@Override public ProcessResult process(TaskContext taskContext) throws Exception { OmsLogger logger = taskContext.getOmsLogger(); logger.info("using params: {}", taskContext.getJobParams()); LongAdder cleanNum = new LongAdder(); Stopwatch sw = Stopwatch.createStarted(); List<CleanupParams> cleanupParamsList = JSONArray.parseArray(taskContext.getJobParams(), CleanupParams.class); cleanupParamsList.forEach(params -> { logger.info("start to process: {}", JSON.toJSON(params)); if (StringUtils.isEmpty(params.filePattern) || StringUtils.isEmpty(params.dirPath)) { logger.warn("skip due to invalid params!"); return; } File dir = new File(params.dirPath); if (!dir.exists()) { logger.warn("skip due to dirPath[{}] not exists", params.dirPath); return; } if (!dir.isDirectory()) { logger.warn("skip due to dirPath[{}] is not a directory", params.dirPath); return; } logger.info("start to search directory: {}", params.dirPath); Collection<File> files = FileUtils.listFiles(dir, null, true); logger.info("total file num: {}", files.size()); Pattern filePattern = Pattern.compile(params.filePattern); files.forEach(file -> { String fileName = file.getName(); String filePath = file.getAbsolutePath(); if (!filePattern.matcher(fileName).matches()) { logger.info("file[{}] won't be deleted due to filename not match the pattern: {}", fileName, params.filePattern); return; } // last modify time interval, xxx hours int interval = (int) Math.ceil((System.currentTimeMillis() - file.lastModified()) / 3600000.0); if (interval < params.retentionTime) { logger.info("file[{}] won't be deleted because it does not meet the time requirement", filePath); return; } try { FileUtils.forceDelete(file); cleanNum.increment(); logger.info("delete file[{}] successfully!", filePath); } catch (Exception e) { logger.error("delete file[{}] failed!", filePath, e); } }); }); return new ProcessResult(true, String.format("cost:%s,clean:%d", sw.toString(), cleanNum.longValue())); }
@Test void testCleanWorkerScript() throws Exception { JSONObject params = new JSONObject(); params.put("dirPath", "/"); params.put("filePattern", "(shell|python)_[0-9]*\\.(sh|py)"); params.put("retentionTime", 24); JSONArray array = new JSONArray(); array.add(params); TaskContext taskContext = TestUtils.genTaskContext(array.toJSONString()); System.out.println(new FileCleanupProcessor().process(taskContext)); }
@Override public ByteBuf setIndex(int readerIndex, int writerIndex) { if (checkBounds) { checkIndexBounds(readerIndex, writerIndex, capacity()); } setIndex0(readerIndex, writerIndex); return this; }
@Test public void setIndexBoundaryCheck2() { assertThrows(IndexOutOfBoundsException.class, new Executable() { @Override public void execute() { buffer.setIndex(CAPACITY / 2, CAPACITY / 4); } }); }
public static List<Class<?>> findEntityClassesFromDirectory(String[] pckgs) { @SuppressWarnings("unchecked") final AnnotationAcceptingListener asl = new AnnotationAcceptingListener(Entity.class); try (final PackageNamesScanner scanner = new PackageNamesScanner(pckgs, true)) { while (scanner.hasNext()) { final String next = scanner.next(); if (asl.accept(next)) { try (final InputStream in = scanner.open()) { asl.process(next, in); } catch (IOException e) { throw new RuntimeException("AnnotationAcceptingListener failed to process scanned resource: " + next); } } } } return new ArrayList<>(asl.getAnnotatedClasses()); }
@Test void testFindEntityClassesFromDirectory() { //given String packageWithEntities = "io.dropwizard.hibernate.fake.entities.pckg"; //when List<Class<?>> findEntityClassesFromDirectory = ScanningHibernateBundle.findEntityClassesFromDirectory(new String[]{packageWithEntities}); //then assertFalse(findEntityClassesFromDirectory.isEmpty()); assertEquals(4, findEntityClassesFromDirectory.size()); }
public final T proxyWithSystemProperties() { return proxyWithSystemProperties(System.getProperties()); }
@Test void testCreateClientWithSystemProxyProvider() { TestClientTransport transport = createTestTransportForProxy() .proxyWithSystemProperties(); assertThat(transport).isNotNull(); }
@Override public synchronized void patchConnectorConfig(String connName, Map<String, String> configPatch, Callback<Created<ConnectorInfo>> callback) { try { ConnectorInfo connectorInfo = connectorInfo(connName); if (connectorInfo == null) { callback.onCompletion(new NotFoundException("Connector " + connName + " not found", null), null); return; } Map<String, String> patchedConfig = ConnectUtils.patchConfig(connectorInfo.config(), configPatch); validateConnectorConfig(patchedConfig, (error, configInfos) -> { if (error != null) { callback.onCompletion(error, null); return; } requestExecutorService.submit( () -> putConnectorConfig(connName, patchedConfig, null, true, callback, configInfos) ); }); } catch (Throwable e) { callback.onCompletion(e, null); } }
@Test public void testPatchConnectorConfig() throws ExecutionException, InterruptedException, TimeoutException { initialize(true); // Create the connector. Map<String, String> originalConnConfig = connectorConfig(SourceSink.SOURCE); originalConnConfig.put("foo0", "unaffected"); originalConnConfig.put("foo1", "will-be-changed"); originalConnConfig.put("foo2", "will-be-removed"); Map<String, String> connConfigPatch = new HashMap<>(); connConfigPatch.put("foo1", "changed"); connConfigPatch.put("foo2", null); connConfigPatch.put("foo3", "added"); Map<String, String> patchedConnConfig = new HashMap<>(originalConnConfig); patchedConnConfig.put("foo0", "unaffected"); patchedConnConfig.put("foo1", "changed"); patchedConnConfig.remove("foo2"); patchedConnConfig.put("foo3", "added"); expectAdd(SourceSink.SOURCE, false, false, false); expectConfigValidation(SourceSink.SOURCE, originalConnConfig, patchedConnConfig); expectConnectorStartingWithoutTasks(originalConnConfig, true); herder.putConnectorConfig(CONNECTOR_NAME, originalConnConfig, false, createCallback); createCallback.get(1000L, TimeUnit.SECONDS); expectConnectorStartingWithoutTasks(patchedConnConfig, false); FutureCallback<Herder.Created<ConnectorInfo>> patchCallback = new FutureCallback<>(); herder.patchConnectorConfig(CONNECTOR_NAME, connConfigPatch, patchCallback); Map<String, String> returnedConfig = patchCallback.get(1000L, TimeUnit.SECONDS).result().config(); assertEquals(patchedConnConfig, returnedConfig); // Also check the returned config when requested. FutureCallback<Map<String, String>> configCallback = new FutureCallback<>(); herder.connectorConfig(CONNECTOR_NAME, configCallback); Map<String, String> returnedConfig2 = configCallback.get(1000L, TimeUnit.SECONDS); assertEquals(patchedConnConfig, returnedConfig2); }
public double toDouble(String name) { return toDouble(name, 0.0); }
@Test public void testToDouble_String_double() { System.out.println("toDouble"); double expResult; double result; Properties props = new Properties(); props.put("value1", "12345.6789"); props.put("value2", "-9000.001"); props.put("empty", ""); props.put("str", "abc"); props.put("boolean", "true"); props.put("float", "24.98"); props.put("int", "12"); props.put("char", "a"); PropertyParser instance = new PropertyParser(props); expResult = 12345.6789; result = instance.toDouble("value1", 0.123); assertEquals(expResult, result, 0); expResult = -9000.001; result = instance.toDouble("value2", 0.123); assertEquals(expResult, result, 0); expResult = 0.123; result = instance.toDouble("empty", 0.123); assertEquals(expResult, result, 0); expResult = 0.123; result = instance.toDouble("str", 0.123); assertEquals(expResult, result, 0); expResult = 0.123; result = instance.toDouble("boolean", 0.123); assertEquals(expResult, result, 0); expResult = 24.98; result = instance.toDouble("float", 0.123); assertEquals(expResult, result, 0); expResult = 12; result = instance.toDouble("int", 0.123); assertEquals(expResult, result, 0); expResult = 0.123; result = instance.toDouble("char", 0.123); assertEquals(expResult, result, 0); expResult = 0.123; result = instance.toDouble("nonexistent", 0.123); assertEquals(expResult, result, 0); }
public static byte[] decompress(byte[] bytes) { if (bytes == null) { throw new NullPointerException("bytes is null"); } int size = (int) Zstd.decompressedSize(bytes); byte[] decompressBytes = new byte[size]; Zstd.decompress(decompressBytes, bytes); return decompressBytes; }
@Test public void test_decompress() { Assertions.assertThrows(NullPointerException.class, () -> { ZstdUtil.decompress(null); }); }
public static KTableHolder<GenericKey> build( final KGroupedStreamHolder groupedStream, final StreamAggregate aggregate, final RuntimeBuildContext buildContext, final MaterializedFactory materializedFactory) { return build( groupedStream, aggregate, buildContext, materializedFactory, new AggregateParamsFactory() ); }
@Test public void shouldBuildMaterializedWithCorrectSerdesForWindowedAggregate() { for (final Runnable given : given()) { // Given: reset(groupedStream, timeWindowedStream, sessionWindowedStream, aggregated, materializedFactory); given.run(); // When: windowedAggregate.build(planBuilder, planInfo); // Then: verify(materializedFactory).create(same(keySerde), same(valueSerde), any(), (Optional<Duration>) any()); } }
@Override public boolean equals(Object o) { if (this == o) { return true; } if (!(o instanceof AlluxioURI)) { return false; } AlluxioURI that = (AlluxioURI) o; return mUri.equals(that.mUri); }
@Test public void multiPartSchemeEquals() { assertTrue(new AlluxioURI("scheme:part1://127.0.0.1:3306/a.txt") .equals(new AlluxioURI("scheme:part1://127.0.0.1:3306/a.txt"))); assertFalse(new AlluxioURI("part1://127.0.0.1:3306/a.txt") .equals(new AlluxioURI("scheme:part1://127.0.0.1:3306/a.txt"))); assertFalse(new AlluxioURI("scheme:part1://127.0.0.1:3306/a.txt") .equals(new AlluxioURI("part1://127.0.0.1:3306/a.txt"))); assertTrue(new AlluxioURI("scheme:part1:part2://127.0.0.1:3306/a.txt") .equals(new AlluxioURI("scheme:part1:part2://127.0.0.1:3306/a.txt"))); assertFalse(new AlluxioURI("part2://127.0.0.1:3306/a.txt") .equals(new AlluxioURI("scheme:part1:part2://127.0.0.1:3306/a.txt"))); assertFalse(new AlluxioURI("scheme:part1:part2://127.0.0.1:3306/a.txt") .equals(new AlluxioURI("part2://127.0.0.1:3306/a.txt"))); new EqualsTester() .addEqualityGroup(new AlluxioURI("sch:p1:p2://aaaabbbb:12345/"), new AlluxioURI("sch:p1:p2://aaaabbbb:12345/")) .addEqualityGroup(new AlluxioURI("standard://host:12345/")) .testEquals(); }
@Override public Collection<DatabasePacket> execute() throws SQLException { switch (packet.getType()) { case 'S': return describePreparedStatement(); case 'P': return Collections.singleton(portalContext.get(packet.getName()).describe()); default: throw new UnsupportedSQLOperationException("Unsupported describe type: " + packet.getType()); } }
@Test void assertDescribePreparedStatementInsertWithoutColumns() throws SQLException { when(packet.getType()).thenReturn('S'); final String statementId = "S_1"; when(packet.getName()).thenReturn(statementId); String sql = "insert into t_order values (?, 0, 'char', ?), (2, ?, ?, '')"; SQLStatement sqlStatement = SQL_PARSER_ENGINE.parse(sql, false); List<PostgreSQLColumnType> parameterTypes = new ArrayList<>(sqlStatement.getParameterCount()); for (int i = 0; i < sqlStatement.getParameterCount(); i++) { parameterTypes.add(PostgreSQLColumnType.UNSPECIFIED); } SQLStatementContext sqlStatementContext = mock(InsertStatementContext.class); when(sqlStatementContext.getSqlStatement()).thenReturn(sqlStatement); ContextManager contextManager = mockContextManager(); when(ProxyContext.getInstance().getContextManager()).thenReturn(contextManager); List<Integer> parameterIndexes = IntStream.range(0, sqlStatement.getParameterCount()).boxed().collect(Collectors.toList()); connectionSession.getServerPreparedStatementRegistry().addPreparedStatement(statementId, new PostgreSQLServerPreparedStatement(sql, sqlStatementContext, new HintValueContext(), parameterTypes, parameterIndexes)); Collection<DatabasePacket> actualPackets = executor.execute(); assertThat(actualPackets.size(), is(2)); Iterator<DatabasePacket> actualPacketsIterator = actualPackets.iterator(); PostgreSQLParameterDescriptionPacket actualParameterDescription = (PostgreSQLParameterDescriptionPacket) actualPacketsIterator.next(); PostgreSQLPacketPayload mockPayload = mock(PostgreSQLPacketPayload.class); actualParameterDescription.write(mockPayload); verify(mockPayload).writeInt2(4); verify(mockPayload, times(2)).writeInt4(23); verify(mockPayload, times(2)).writeInt4(18); assertThat(actualPacketsIterator.next(), is(PostgreSQLNoDataPacket.getInstance())); }
static List<Order> getImplicitOrderBy(StructuredQuery query) { List<OrderByFieldPath> expectedImplicitOrders = new ArrayList<>(); if (query.hasWhere()) { fillInequalityFields(query.getWhere(), expectedImplicitOrders); } Collections.sort(expectedImplicitOrders); if (expectedImplicitOrders.stream().noneMatch(OrderByFieldPath::isDocumentName)) { expectedImplicitOrders.add(OrderByFieldPath.fromString("__name__")); } for (Order order : query.getOrderByList()) { OrderByFieldPath orderField = OrderByFieldPath.fromString(order.getField().getFieldPath()); expectedImplicitOrders.remove(orderField); } List<Order> additionalOrders = new ArrayList<>(); if (!expectedImplicitOrders.isEmpty()) { Direction lastDirection = query.getOrderByCount() == 0 ? Direction.ASCENDING : query.getOrderByList().get(query.getOrderByCount() - 1).getDirection(); for (OrderByFieldPath field : expectedImplicitOrders) { additionalOrders.add( Order.newBuilder() .setDirection(lastDirection) .setField( FieldReference.newBuilder().setFieldPath(field.getOriginalString()).build()) .build()); } } return additionalOrders; }
@Test public void getImplicitOrderBy_malformedWhereThrows() { testQuery = testQuery .toBuilder() .setWhere( Filter.newBuilder() .setUnaryFilter( UnaryFilter.newBuilder() .setField(FieldReference.newBuilder().setFieldPath("")) .setOp(UnaryFilter.Operator.IS_NOT_NAN))) .build(); assertThrows(IllegalArgumentException.class, () -> QueryUtils.getImplicitOrderBy(testQuery)); }
SelType pop() { SelType ret = stack[top]; stack[top--] = null; return ret; }
@Test public void testPop() { assertTrue(state.isStackEmpty()); state.push(SelString.of("foo")); state.push(SelString.of("bar")); assertFalse(state.isStackEmpty()); SelType res = state.pop(); assertEquals("STRING: bar", res.type() + ": " + res); res = state.pop(); assertEquals("STRING: foo", res.type() + ": " + res); assertTrue(state.isStackEmpty()); }
public void cancelAll(UUID callerUuid, Throwable cause) { for (WaitSetEntry entry : queue) { if (!entry.isValid()) { continue; } Operation op = entry.getOperation(); if (callerUuid.equals(op.getCallerUuid())) { entry.cancel(cause); } } }
@Test public void cancelAll() { WaitSet waitSet = newWaitSet(); BlockedOperation op1 = newBlockingOperationWithServiceNameAndObjectId("service1", "1"); waitSet.park(op1); BlockedOperation op2 = newBlockingOperationWithServiceNameAndObjectId("service1", "2"); waitSet.park(op2); BlockedOperation op3 = newBlockingOperationWithServiceNameAndObjectId("service2", "1"); waitSet.park(op3); Exception cause = new Exception(); waitSet.cancelAll("foo", "1", cause); assertCancelled(waitSet, op1, cause); assertCancelled(waitSet, op2, null); assertCancelled(waitSet, op3, null); }
@SuppressWarnings("unused") // Part of required API. public void execute( final ConfiguredStatement<InsertValues> statement, final SessionProperties sessionProperties, final KsqlExecutionContext executionContext, final ServiceContext serviceContext ) { final InsertValues insertValues = statement.getStatement(); final MetaStore metaStore = executionContext.getMetaStore(); final KsqlConfig config = statement.getSessionConfig().getConfig(true); final DataSource dataSource = getDataSource(config, metaStore, insertValues); validateInsert(insertValues.getColumns(), dataSource); final ProducerRecord<byte[], byte[]> record = buildRecord(statement, metaStore, dataSource, serviceContext); try { producer.sendRecord(record, serviceContext, config.getProducerClientConfigProps()); } catch (final TopicAuthorizationException e) { // TopicAuthorizationException does not give much detailed information about why it failed, // except which topics are denied. Here we just add the ACL to make the error message // consistent with other authorization error messages. final Exception rootCause = new KsqlTopicAuthorizationException( AclOperation.WRITE, e.unauthorizedTopics() ); throw new KsqlException(createInsertFailedExceptionMessage(insertValues), rootCause); } catch (final ClusterAuthorizationException e) { // ClusterAuthorizationException is thrown when using idempotent producers // and either a topic write permission or a cluster-level idempotent write // permission (only applicable for broker versions no later than 2.8) is // missing. In this case, we include additional context to help the user // distinguish this type of failure from other permissions exceptions // such as the ones thrown above when TopicAuthorizationException is caught. throw new KsqlException( createInsertFailedExceptionMessage(insertValues), createClusterAuthorizationExceptionRootCause(dataSource) ); } catch (final KafkaException e) { if (e.getCause() != null && e.getCause() instanceof ClusterAuthorizationException) { // The error message thrown when an idempotent producer is missing permissions // is (nondeterministically) inconsistent: it is either a raw ClusterAuthorizationException, // as checked for above, or a ClusterAuthorizationException wrapped inside a KafkaException. // ksqlDB handles these two the same way, accordingly. // See https://issues.apache.org/jira/browse/KAFKA-14138 for more. throw new KsqlException( createInsertFailedExceptionMessage(insertValues), createClusterAuthorizationExceptionRootCause(dataSource) ); } else { throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e); } } catch (final Exception e) { throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e); } }
@Test public void shouldThrowOnTablesWithKeyFieldAndNullKeyFieldValueProvided() { // Given: givenSourceTableWithSchema(SerdeFeatures.of(), SerdeFeatures.of()); final ConfiguredStatement<InsertValues> statement = givenInsertValues( ImmutableList.of(COL1), ImmutableList.of( new LongLiteral(2L)) ); // When: final Exception e = assertThrows( KsqlException.class, () -> executor.execute(statement, mock(SessionProperties.class), engine, serviceContext) ); // Then: assertThat(e.getMessage(), containsString( "Failed to insert values into 'TOPIC'. Value for primary key column(s) k0 is required for tables")); }
@Override public Mono<SinglePageVo> getByName(String pageName) { return client.get(SinglePage.class, pageName) .filterWhen(page -> queryPredicate().map(predicate -> predicate.test(page))) .flatMap(singlePagePublicQueryService::convertToVo); }
@Test void getByName() { // fix gh-2992 String fakePageName = "fake-page"; SinglePage singlePage = new SinglePage(); singlePage.setMetadata(new Metadata()); singlePage.getMetadata().setName(fakePageName); singlePage.getMetadata().setLabels(Map.of(SinglePage.PUBLISHED_LABEL, "true")); singlePage.setSpec(new SinglePage.SinglePageSpec()); singlePage.getSpec().setOwner("fake-owner"); singlePage.getSpec().setReleaseSnapshot("fake-release"); singlePage.getSpec().setPublish(true); singlePage.getSpec().setDeleted(false); singlePage.getSpec().setVisible(Post.VisibleEnum.PUBLIC); singlePage.setStatus(new SinglePage.SinglePageStatus()); when(client.get(eq(SinglePage.class), eq(fakePageName))) .thenReturn(Mono.just(singlePage)); when(singlePageConversionService.convertToVo(eq(singlePage))) .thenReturn(Mono.just(mock(SinglePageVo.class))); singlePageFinder.getByName(fakePageName) .as(StepVerifier::create) .consumeNextWith(page -> assertThat(page).isNotNull()) .verifyComplete(); verify(client).get(SinglePage.class, fakePageName); }
@Override public void export(RegisterTypeEnum registerType) { if (this.exported) { return; } if (getScopeModel().isLifeCycleManagedExternally()) { // prepare model for reference getScopeModel().getDeployer().prepare(); } else { // ensure start module, compatible with old api usage getScopeModel().getDeployer().start(); } synchronized (this) { if (this.exported) { return; } if (!this.isRefreshed()) { this.refresh(); } if (this.shouldExport()) { this.init(); if (shouldDelay()) { // should register if delay export doDelayExport(); } else if (Integer.valueOf(-1).equals(getDelay()) && Boolean.parseBoolean(ConfigurationUtils.getProperty( getScopeModel(), CommonConstants.DUBBO_MANUAL_REGISTER_KEY, "false"))) { // should not register by default doExport(RegisterTypeEnum.MANUAL_REGISTER); } else { doExport(registerType); } } } }
@Test void testApplicationInUrl() { service.export(); assertNotNull(service.toUrl().getApplication()); Assertions.assertEquals("app", service.toUrl().getApplication()); }
public static Data toHeapData(Data data) { if (data == null || data instanceof HeapData) { return data; } return new HeapData(data.toByteArray()); }
@Test public void whenNull() { Data data = ToHeapDataConverter.toHeapData(null); assertNull(data); }
@Override public boolean test(final Path test) { return this.equals(new DefaultPathPredicate(test)); }
@Test public void testPredicateVersionIdDirectory() { final Path t = new Path("/f", EnumSet.of(Path.Type.directory), new PathAttributes().withVersionId("1")); assertTrue(new DefaultPathPredicate(t).test(t)); assertTrue(new DefaultPathPredicate(t).test(new Path("/f", EnumSet.of(Path.Type.directory), new PathAttributes().withVersionId("1")))); assertTrue(new DefaultPathPredicate(t).test(new Path("/f", EnumSet.of(Path.Type.directory), new PathAttributes().withVersionId("2")))); }
public static Read<String> read() { return new AutoValue_MongoDbGridFSIO_Read.Builder<String>() .setParser(TEXT_PARSER) .setCoder(StringUtf8Coder.of()) .setConnectionConfiguration(ConnectionConfiguration.create()) .setSkew(Duration.ZERO) .build(); }
@Test public void testReadWithParser() { PCollection<KV<String, Integer>> output = pipeline.apply( MongoDbGridFSIO.read() .withUri("mongodb://localhost:" + port) .withDatabase(DATABASE) .withBucket("mapBucket") .<KV<String, Integer>>withParser( (input, callback) -> { try (final BufferedReader reader = new BufferedReader( new InputStreamReader( input.getInputStream(), StandardCharsets.UTF_8))) { String line = reader.readLine(); while (line != null) { try (Scanner scanner = new Scanner(line.trim())) { scanner.useDelimiter("\\t"); long timestamp = scanner.nextLong(); String name = scanner.next(); int score = scanner.nextInt(); callback.output(KV.of(name, score), new Instant(timestamp)); } line = reader.readLine(); } } }) .withSkew(Duration.millis(3610000L)) .withCoder(KvCoder.of(StringUtf8Coder.of(), VarIntCoder.of()))); PAssert.thatSingleton(output.apply("Count All", Count.globally())).isEqualTo(50100L); PAssert.that(output.apply("Max PerElement", Max.integersPerKey())) .satisfies( input -> { for (KV<String, Integer> element : input) { assertEquals(101, element.getValue().longValue()); } return null; }); pipeline.run(); }
@Override public String toString() { if (size == 0) { return "Buffer[size=0]"; } if (size <= 16) { ByteString data = clone().readByteString(); return String.format("Buffer[size=%s data=%s]", size, data.hex()); } MessageDigest md5 = new MessageDigest("MD5"); md5.update(head.data, head.pos, head.limit - head.pos); for (Segment s = head.next; s != head; s = s.next) { md5.update(s.data, s.pos, s.limit - s.pos); } return String.format("Buffer[size=%s md5=%s]", size, ByteString.of(md5.digest()).hex()); }
@Test public void toStringOnEmptyBuffer() throws Exception { Buffer buffer = new Buffer(); assertEquals("Buffer[size=0]", buffer.toString()); }
@Override protected String getSnapshotSaveTag() { return SNAPSHOT_SAVE; }
@Test void testGetSnapshotSaveTag() { String snapshotSaveTag = serviceMetadataSnapshotOperation.getSnapshotSaveTag(); assertEquals(snapshotSaveTag, ServiceMetadataSnapshotOperation.class.getSimpleName() + ".SAVE"); }
@Override public String getMethod() { return PATH; }
@Test public void testSetMyCommandsWithEmptyStringLanguageCode() { GetMyCommands getMyCommands = GetMyCommands .builder() .languageCode("") .scope(BotCommandScopeDefault.builder().build()) .build(); assertEquals("getMyCommands", getMyCommands.getMethod()); Throwable thrown = assertThrows(TelegramApiValidationException.class, getMyCommands::validate); assertEquals("LanguageCode parameter can't be empty string", thrown.getMessage()); }
public FilterAggregationBuilder buildTermTopAggregation( String topAggregationName, TopAggregationDefinition<?> topAggregation, @Nullable Integer numberOfTerms, Consumer<BoolQueryBuilder> extraFilters, Consumer<FilterAggregationBuilder> otherSubAggregations ) { Consumer<FilterAggregationBuilder> subAggregations = t -> { t.subAggregation(subAggregationHelper.buildTermsAggregation(topAggregationName, topAggregation, numberOfTerms)); otherSubAggregations.accept(t); }; return buildTopAggregation(topAggregationName, topAggregation, extraFilters, subAggregations); }
@Test public void buildTermTopAggregation_adds_filter_from_FiltersComputer_for_TopAggregation_and_extra_one() { String topAggregationName = randomAlphabetic(10); SimpleFieldTopAggregationDefinition topAggregation = new SimpleFieldTopAggregationDefinition("bar", false); SimpleFieldTopAggregationDefinition otherTopAggregation = new SimpleFieldTopAggregationDefinition("acme", false); BoolQueryBuilder computerFilter = boolQuery(); BoolQueryBuilder otherFilter = boolQuery(); BoolQueryBuilder extraFilter = boolQuery(); when(filtersComputer.getTopAggregationFilter(topAggregation)).thenReturn(Optional.of(computerFilter)); when(filtersComputer.getTopAggregationFilter(otherTopAggregation)).thenReturn(Optional.of(otherFilter)); TermsAggregationBuilder termSubAgg = AggregationBuilders.terms("foo"); when(subAggregationHelper.buildTermsAggregation(topAggregationName, topAggregation, null)).thenReturn(termSubAgg); FilterAggregationBuilder aggregationBuilder = underTest.buildTermTopAggregation( topAggregationName, topAggregation, null, t -> t.must(extraFilter), NO_OTHER_SUBAGGREGATION); assertThat(aggregationBuilder.getName()).isEqualTo(topAggregationName); assertThat(aggregationBuilder.getFilter()).isEqualTo(computerFilter); assertThat(((BoolQueryBuilder) aggregationBuilder.getFilter()).must()).containsExactly(extraFilter); }
@Override public Object next() { // Return empty string if size of json string is zero or if number of elements is zero. if (_jsonStringLength == 0 || _jsonStringLength / DEFAULT_JSON_ELEMENT_LENGTH == 0) { return "{}"; } // Create JSON string { "<character>":<integer>, "<character>":<integer>, ...} as per length specified. Escape // comma's in JSON since comma is used as delimiter in CSV data file that will be used to generate segment. StringBuffer jsonBuffer = new StringBuffer(); int elementCount = _jsonStringLength / DEFAULT_JSON_ELEMENT_LENGTH; jsonBuffer.append("{"); for (int i = 0; i < elementCount; i++) { if (jsonBuffer.length() > 1) { jsonBuffer.append("\\,"); } String item = "\"" + (char) ('a' + _random.nextInt(26)) + "\":" + _random.nextInt(10); jsonBuffer.append(item); } return jsonBuffer.append("}").toString(); }
@Test public void testNext() throws IOException { // JsonGenerator generates empty json when size is less than JsonGenerator.DEFAULT_JSON_ELEMENT_LENGTH JsonGenerator jsonGenerator1 = new JsonGenerator(0); Assert.assertEquals(jsonGenerator1.next(), "{}"); JsonGenerator jsonGenerator2 = new JsonGenerator(4); Assert.assertEquals(jsonGenerator2.next(), "{}"); JsonGenerator jsonGenerator3 = new JsonGenerator(8); checkJson((String) jsonGenerator3.next(), 8); JsonGenerator jsonGenerator4 = new JsonGenerator(20); checkJson((String) jsonGenerator4.next(), 20); }
public List<Duration> calculatePreciseDuration(final Date then) { return calculatePreciseDuration(then != null ? then.toInstant() : null); }
@Test public void testPreciseInThePast() throws Exception { PrettyTime t = new PrettyTime(); List<Duration> durations = t.calculatePreciseDuration(now.minusHours(5).minusMinutes(10).minusSeconds(1)); Assert.assertTrue(durations.size() >= 2); Assert.assertEquals(-5, durations.get(0).getQuantity()); Assert.assertEquals(-10, durations.get(1).getQuantity()); }
@Override public boolean execute(Workflow workflow, Task task, WorkflowExecutor executor) { Map<String, Task> taskMap = TaskHelper.getTaskMap(workflow); Optional<Task.Status> done = executeJoin(task, taskMap); if (done.isPresent() && confirmDone(workflow, task)) { // update task status if it is done task.setStatus(done.get()); return true; } return false; }
@Test public void testExecuteIncomplete() { StepRuntimeState state = new StepRuntimeState(); state.setStatus(StepInstance.Status.FATALLY_FAILED); when(stepInstanceDao.getStepStates(anyString(), anyLong(), anyLong(), anyList())) .thenReturn(Collections.singletonMap("job1", state)); assertFalse(gateTask.execute(workflow, joinTask, null)); assertNull(joinTask.getStatus()); }
public void deleteAcl(String addr, String subject, String resource, long millis) throws RemotingConnectException, RemotingSendRequestException, RemotingTimeoutException, InterruptedException, MQBrokerException { DeleteAclRequestHeader requestHeader = new DeleteAclRequestHeader(subject, resource); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.AUTH_DELETE_ACL, requestHeader); RemotingCommand response = this.remotingClient.invokeSync(addr, request, millis); assert response != null; switch (response.getCode()) { case ResponseCode.SUCCESS: { return; } default: break; } throw new MQBrokerException(response.getCode(), response.getRemark()); }
@Test public void testDeleteAcl() throws RemotingException, InterruptedException, MQBrokerException { mockInvokeSync(); mqClientAPI.deleteAcl(defaultBrokerAddr, "", "", defaultTimeout); }
public void delete(DeletionTask deletionTask) { if (debugDelay != -1) { LOG.debug("Scheduling DeletionTask (delay {}) : {}", debugDelay, deletionTask); recordDeletionTaskInStateStore(deletionTask); sched.schedule(deletionTask, debugDelay, TimeUnit.SECONDS); } }
@Test (timeout=60000) public void testFileDeletionTaskDependency() throws Exception { FakeDefaultContainerExecutor exec = new FakeDefaultContainerExecutor(); Configuration conf = new Configuration(); exec.setConf(conf); DeletionService del = new DeletionService(exec); del.init(conf); del.start(); try { Random r = new Random(); long seed = r.nextLong(); r.setSeed(seed); System.out.println("SEED: " + seed); List<Path> dirs = buildDirs(r, base, 2); createDirs(new Path("."), dirs); // first we will try to delete sub directories which are present. This // should then trigger parent directory to be deleted. List<Path> subDirs = buildDirs(r, dirs.get(0), 2); FileDeletionTask dependentDeletionTask = new FileDeletionTask(del, null, dirs.get(0), new ArrayList<Path>()); List<FileDeletionTask> deletionTasks = new ArrayList<FileDeletionTask>(); for (Path subDir : subDirs) { List<Path> subDirList = new ArrayList<>(); subDirList.add(subDir); FileDeletionTask deletionTask = new FileDeletionTask(del, null, dirs.get(0), subDirList); deletionTask.addDeletionTaskDependency(dependentDeletionTask); deletionTasks.add(deletionTask); } for (FileDeletionTask task : deletionTasks) { del.delete(task); } int msecToWait = 20 * 1000; while (msecToWait > 0 && (lfs.util().exists(dirs.get(0)))) { Thread.sleep(100); msecToWait -= 100; } assertFalse(lfs.util().exists(dirs.get(0))); // Now we will try to delete sub directories; one of the deletion task we // will mark as failure and then parent directory should not be deleted. subDirs = buildDirs(r, dirs.get(1), 2); subDirs.add(new Path(dirs.get(1), "absentFile")); dependentDeletionTask = new FileDeletionTask(del, null, dirs.get(1), new ArrayList<Path>()); deletionTasks = new ArrayList<FileDeletionTask>(); for (Path subDir : subDirs) { List<Path> subDirList = new ArrayList<>(); subDirList.add(subDir); FileDeletionTask deletionTask = new FileDeletionTask(del, null, null, subDirList); deletionTask.addDeletionTaskDependency(dependentDeletionTask); deletionTasks.add(deletionTask); } // marking one of the tasks as a failure. deletionTasks.get(2).setSuccess(false); for (FileDeletionTask task : deletionTasks) { del.delete(task); } msecToWait = 20 * 1000; while (msecToWait > 0 && (lfs.util().exists(subDirs.get(0)) || lfs.util().exists( subDirs.get(1)))) { Thread.sleep(100); msecToWait -= 100; } assertTrue(lfs.util().exists(dirs.get(1))); } finally { del.stop(); } }
@Override public String age(Locale locale, long durationInMillis) { DurationLabel.Result duration = DurationLabel.label(durationInMillis); return message(locale, duration.key(), null, duration.value()); }
@Test public void get_age_with_dates() { assertThat(underTest.age(Locale.ENGLISH, DateUtils.parseDate("2014-01-01"), DateUtils.parseDate("2014-01-02"))).isEqualTo("a day"); }
@Override public double cdf(double x) { if (x < 0) { return 0.0; } else { return 1 - Math.exp(-lambda * x); } }
@Test public void testCdf() { System.out.println("cdf"); ExponentialDistribution instance = new ExponentialDistribution(2.0); instance.rand(); assertEquals(0, instance.cdf(-0.1), 1E-7); assertEquals(0, instance.cdf(0.0), 1E-7); assertEquals(0.8646647, instance.cdf(1.0), 1E-7); assertEquals(0.9816844, instance.cdf(2.0), 1E-7); assertEquals(0.9975212, instance.cdf(3.0), 1E-7); assertEquals(0.9996645, instance.cdf(4.0), 1E-7); assertEquals(0.9999999, instance.cdf(8.0), 1E-7); assertEquals(1.0, instance.cdf(10.0), 1E-7); }
@VisibleForTesting public WeightedPolicyInfo getWeightedPolicyInfo() { return weightedPolicyInfo; }
@Test public void testPolicyInfoSetCorrectly() throws Exception { serializeAndDeserializePolicyManager(wfp, expectedPolicyManager, expectedAMRMProxyPolicy, expectedRouterPolicy); //check the policyInfo propagates through ser/der correctly Assert.assertEquals(((WeightedLocalityPolicyManager) wfp) .getWeightedPolicyInfo(), policyInfo); }
@CanIgnoreReturnValue public final Ordered containsExactly(@Nullable Object @Nullable ... varargs) { List<@Nullable Object> expected = (varargs == null) ? newArrayList((@Nullable Object) null) : asList(varargs); return containsExactlyElementsIn( expected, varargs != null && varargs.length == 1 && varargs[0] instanceof Iterable); }
@Test public void iterableContainsExactlyWithNullFailure() { expectFailureWhenTestingThat(asList(1, null, 3)).containsExactly(1, null, null, 3); assertFailureValue("missing (1)", "null"); }
@Override public boolean offer(T item) { return offer(item, 1); }
@Test public void testByteSerialization() throws IOException, ClassNotFoundException { StreamSummary<String> vs = new StreamSummary<String>(3); String[] stream = {"X", "X", "Y", "Z", "A", "B", "C", "X", "X", "A", "C", "A", "A"}; for (String i : stream) { vs.offer(i); } testSerialization(vs); // Empty vs = new StreamSummary<String>(0); testSerialization(vs); }
@Override public int hashCode() { return events != null ? events.hashCode() : 0; }
@Test public void testHashCode() { assertEquals(batchEventData.hashCode(), batchEventData.hashCode()); assertEquals(batchEventData.hashCode(), batchEventDataSameAttribute.hashCode()); assertEquals(batchEventData.hashCode(), batchEventDataOtherSource.hashCode()); assertEquals(batchEventData.hashCode(), batchEventDataOtherPartitionId.hashCode()); assumeDifferentHashCodes(); assertNotEquals(batchEventData.hashCode(), batchEventDataOtherEvent.hashCode()); assertNotEquals(batchEventData.hashCode(), batchEventDataNoEvent.hashCode()); }
public static Long userId() { return 0L; }
@Test public void userIdTest() { assertEquals(Long.valueOf(0L), DesensitizedUtil.userId()); }
@SuppressWarnings("unchecked") public Output run(RunContext runContext) throws Exception { Logger logger = runContext.logger(); try (HttpClient client = this.client(runContext, this.method)) { HttpRequest<String> request = this.request(runContext); HttpResponse<String> response; try { response = client .toBlocking() .exchange(request, Argument.STRING, Argument.STRING); // check that the string is a valid Unicode string if (response.getBody().isPresent()) { OptionalInt illegalChar = response.body().chars().filter(c -> !Character.isDefined(c)).findFirst(); if (illegalChar.isPresent()) { throw new IllegalArgumentException("Illegal unicode code point in request body: " + illegalChar.getAsInt() + ", the Request task only support valid Unicode strings as body.\n" + "You can try using the Download task instead."); } } } catch (HttpClientResponseException e) { if (!allowFailed) { throw e; } //noinspection unchecked response = (HttpResponse<String>) e.getResponse(); } logger.debug("Request '{}' with the response code '{}'", request.getUri(), response.getStatus().getCode()); return this.output(runContext, request, response); } }
@Test void multipartCustomFilename() throws Exception { File file = new File(Objects.requireNonNull(RequestTest.class.getClassLoader().getResource("application-test.yml")).toURI()); URI fileStorage = storageInterface.put( null, new URI("/" + FriendlyId.createFriendlyId()), new FileInputStream(file) ); try ( ApplicationContext applicationContext = ApplicationContext.run(); EmbeddedServer server = applicationContext.getBean(EmbeddedServer.class).start(); ) { Request task = Request.builder() .id(RequestTest.class.getSimpleName()) .type(RequestTest.class.getName()) .method(HttpMethod.POST) .contentType(MediaType.MULTIPART_FORM_DATA) .uri(server.getURL().toString() + "/post/multipart") .formData(ImmutableMap.of("hello", "world", "file", ImmutableMap.of("content", fileStorage.toString(), "name", "test.yml"))) .build(); RunContext runContext = TestsUtils.mockRunContext(this.runContextFactory, task, ImmutableMap.of()); Request.Output output = task.run(runContext); assertThat(output.getBody(), is("world > " + IOUtils.toString(new FileInputStream(file), Charsets.UTF_8))); assertThat(output.getCode(), is(200)); } }
private boolean isNotEmptyConfig() { return header.isNotEmptyConfig() || parameter.isNotEmptyConfig() || cookie.isNotEmptyConfig(); }
@Test public void testShenyuCookie() { RequestHandle handle = new RequestHandle(); RequestHandle.ShenyuCookie cookie = handle.new ShenyuCookie( ImmutableMap.of("addKey", "addValue"), ImmutableMap.of("replaceKey", "newKey"), ImmutableMap.of("setKey", "newValue"), Sets.newSet("removeKey") ); assertThat(cookie.isNotEmptyConfig(), is(true)); assertThat(cookie.getAddCookies(), hasEntry("addKey", "addValue")); assertThat(cookie.getReplaceCookieKeys(), hasEntry("replaceKey", "newKey")); assertThat(cookie.getSetCookies(), hasEntry("setKey", "newValue")); assertThat(cookie.getRemoveCookieKeys(), hasItems("removeKey")); RequestHandle.ShenyuCookie cookie1 = handle.new ShenyuCookie(); cookie1.setAddCookies(ImmutableMap.of("addKey", "addValue")); cookie1.setReplaceCookieKeys(ImmutableMap.of("replaceKey", "newKey")); cookie1.setSetCookies(ImmutableMap.of("setKey", "newValue")); cookie1.setRemoveCookieKeys(ImmutableSet.of("removeKey")); assertThat(ImmutableSet.of(cookie, cookie1), hasSize(1)); }
@Override protected int poll() throws Exception { // must reset for each poll shutdownRunningTask = null; pendingExchanges = 0; List<software.amazon.awssdk.services.sqs.model.Message> messages = pollingTask.call(); // okay we have some response from aws so lets mark the consumer as ready forceConsumerAsReady(); Queue<Exchange> exchanges = createExchanges(messages); return processBatch(CastUtils.cast(exchanges)); }
@Test void shouldIgnoreAutomaticalQueueCreationWhenAlreadyExists() throws Exception { // given configuration.setAutoCreateQueue(true); sqsClientMock.setReceiveRequestHandler(request -> { throw QueueDoesNotExistException.builder().build(); }); try (var tested = createConsumer(5)) { // when var polledMessagesCount = tested.poll(); // then assertThat(polledMessagesCount).isZero(); assertThat(receivedExchanges).isEmpty(); assertThat(sqsClientMock.getReceiveRequests()).containsExactlyInAnyOrder(expectedReceiveRequest(5)); assertThat(sqsClientMock.getQueueUrlRequests()).containsExactly(GetQueueUrlRequest.builder() .queueName(configuration.getQueueName()) .build()); assertThat(sqsClientMock.getCreateQueueRequets()).isEmpty(); } }
@Override public List<TransferItem> list(final Session<?> session, final Path directory, final Local local, final ListProgressListener listener) throws BackgroundException { if(log.isDebugEnabled()) { log.debug(String.format("List children for %s", directory)); } if(directory.isSymbolicLink() && new DownloadSymlinkResolver(roots).resolve(directory)) { if(log.isDebugEnabled()) { log.debug(String.format("Do not list children for symbolic link %s", directory)); } return Collections.emptyList(); } else { final AttributedList<Path> list; if(cache.isCached(directory)) { list = cache.get(directory); } else { list = session.getFeature(ListService.class).list(directory, listener); cache.put(directory, list); } final List<TransferItem> children = new ArrayList<>(); // Return copy with filtered result only for(Path f : new AttributedList<>(list.filter(comparator, filter))) { children.add(new TransferItem(f, LocalFactory.get(local, f.getName()))); } return children; } }
@Test public void testChildrenEmpty() throws Exception { final Path root = new Path("/t", EnumSet.of(Path.Type.directory)); final Transfer t = new DownloadTransfer(new Host(new TestProtocol()), root, null); final NullSession session = new NullSession(new Host(new TestProtocol())) { @Override public AttributedList<Path> list(final Path file, final ListProgressListener listener) { return AttributedList.emptyList(); } }; assertTrue(t.list(session, root, new NullLocal("t") { @Override public boolean exists() { return true; } }, new DisabledListProgressListener()).isEmpty()); }
@Override public Map<StreamMessageId, Map<K, V>> rangeReversed(int count, StreamMessageId startId, StreamMessageId endId) { return get(rangeReversedAsync(count, startId, endId)); }
@Test public void testRangeReversed() { RStream<String, String> stream = redisson.getStream("test"); assertThat(stream.size()).isEqualTo(0); Map<String, String> entries1 = new HashMap<>(); entries1.put("1", "11"); entries1.put("3", "31"); stream.add(new StreamMessageId(1), StreamAddArgs.entries(entries1).trimNonStrict().maxLen(1).noLimit()); assertThat(stream.size()).isEqualTo(1); Map<String, String> entries2 = new HashMap<>(); entries2.put("5", "55"); entries2.put("7", "77"); stream.add(new StreamMessageId(2), StreamAddArgs.entries(entries2).trimNonStrict().maxLen(1).noLimit()); Map<StreamMessageId, Map<String, String>> r2 = stream.rangeReversed(10, StreamMessageId.MAX, StreamMessageId.MIN); assertThat(r2.keySet()).containsExactly(new StreamMessageId(2), new StreamMessageId(1)); assertThat(r2.get(new StreamMessageId(1))).isEqualTo(entries1); assertThat(r2.get(new StreamMessageId(2))).isEqualTo(entries2); }
public Map<String, List<DataNode>> getDataNodeGroups() { return DataNodeUtils.getDataNodeGroups(actualDataNodes); }
@Test void assertDatNodeGroups() { Collection<String> dataSourceNames = new LinkedList<>(); String logicTableName = "table_0"; dataSourceNames.add("ds0"); dataSourceNames.add("ds1"); ShardingTable shardingTable = new ShardingTable(dataSourceNames, logicTableName); Map<String, List<DataNode>> actual = shardingTable.getDataNodeGroups(); assertThat(actual.size(), is(2)); assertTrue(actual.get("ds0").contains(new DataNode("ds0", "table_0"))); assertTrue(actual.get("ds1").contains(new DataNode("ds1", "table_0"))); }
@Override public IMetaStore getMetastore() { return getMetastore( null ); }
@Test public void testGetMetastoreTest() { //Test that repository metastore gets returned if both local and repository metastore providers exist. //Also test that both providers can be accessed directly. MetastoreProvider localProvider = mock( MetastoreProvider.class ); IMetaStore localMeta = mock( IMetaStore.class ); when( localProvider.getMetastore() ).thenReturn( localMeta ); when( localProvider.getProviderType() ).thenReturn( MetastoreLocator.LOCAL_PROVIDER_KEY ); MetastoreProvider repoProvider = mock( MetastoreProvider.class ); IMetaStore repoMeta = mock( IMetaStore.class ); when( repoProvider.getMetastore() ).thenReturn( repoMeta ); when( repoProvider.getProviderType() ).thenReturn( MetastoreLocator.REPOSITORY_PROVIDER_KEY ); Collection<MetastoreProvider> providerCollection = new ArrayList<>(); providerCollection.add( localProvider ); try ( MockedStatic<PluginServiceLoader> pluginServiceLoaderMockedStatic = Mockito.mockStatic( PluginServiceLoader.class ) ) { pluginServiceLoaderMockedStatic.when( () -> PluginServiceLoader.loadServices( MetastoreProvider.class ) ) .thenReturn( providerCollection ); // only local provider exists assertEquals( localMeta, metastoreLocator.getMetastore() ); providerCollection.clear(); providerCollection.add( repoProvider ); // only repo provider exists assertEquals( repoMeta, metastoreLocator.getMetastore() ); providerCollection.add( localProvider ); // both providers exist assertEquals( localMeta, metastoreLocator.getExplicitMetastore( MetastoreLocator.LOCAL_PROVIDER_KEY ) ); assertEquals( repoMeta, metastoreLocator.getExplicitMetastore( MetastoreLocator.REPOSITORY_PROVIDER_KEY ) ); } }
@Override public SchemaKStream<?> buildStream(final PlanBuildContext buildContext) { if (!joinKey.isForeignKey()) { ensureMatchingPartitionCounts(buildContext.getServiceContext().getTopicClient()); } final JoinerFactory joinerFactory = new JoinerFactory( buildContext, this, buildContext.buildNodeContext(getId().toString())); return joinerFactory.getJoiner(left.getNodeOutputType(), right.getNodeOutputType()).join(); }
@Test public void shouldPerformStreamToStreamOuterJoinWithGrace() { // Given: setupStream(left, leftSchemaKStream); setupStream(right, rightSchemaKStream); final JoinNode joinNode = new JoinNode(nodeId, OUTER, joinKey, true, left, right, WITHIN_EXPRESSION_WITH_GRACE, "KAFKA"); // When: joinNode.buildStream(planBuildContext); // Then: verify(leftSchemaKStream).outerJoin( rightSchemaKStream, SYNTH_KEY, WITHIN_EXPRESSION_WITH_GRACE.get(), VALUE_FORMAT.getFormatInfo(), OTHER_FORMAT.getFormatInfo(), CONTEXT_STACKER ); }
@Udf(description = "Splits a string into an array of substrings based on a regexp.") public List<String> regexpSplit( @UdfParameter( description = "The string to be split. If NULL, then function returns NULL.") final String string, @UdfParameter( description = "The regular expression to split the string by. " + "If NULL, then function returns NULL.") final String regexp) { if (string == null || regexp == null) { return null; } // Use Guava version to be compatible with other splitting functions. final Pattern p = getPattern(regexp); if (regexp.isEmpty() || p.matcher("").matches()) { return Arrays.asList(p.split(string)); } else { return Splitter.on(p).splitToList(string); } }
@Test public void shouldReturnOriginalStringOnNotFoundRegexp() { assertThat(udf.regexpSplit("", "z"), contains("")); assertThat(udf.regexpSplit("x-y", "z"), contains("x-y")); }
@NotNull @Override public Response intercept(@NotNull Chain chain) throws IOException { Request request = chain.request().newBuilder().removeHeader("Accept-Encoding").build(); Response response = chain.proceed(request); if (response.headers("Content-Encoding").contains("gzip")) { response.close(); } return response; }
@Test public void intercept_shouldAlwaysRemoveAcceptEncoding() throws IOException { underTest.intercept(chain); verify(builderThatRemovesHeaders, times(1)).removeHeader("Accept-Encoding"); }
public static <T> List<List<T>> groupByField(Collection<T> collection, final String fieldName) { return group(collection, new Hash32<T>() { private final List<Object> fieldNameList = new ArrayList<>(); @Override public int hash32(T t) { if (null == t || false == BeanUtil.isBean(t.getClass())) { // 非Bean放在同一子分组中 return 0; } final Object value = ReflectUtil.getFieldValue(t, fieldName); int hash = fieldNameList.indexOf(value); if (hash < 0) { fieldNameList.add(value); return fieldNameList.size() - 1; } else { return hash; } } }); }
@Test public void groupByFieldTest() { final List<TestBean> list = CollUtil.newArrayList(new TestBean("张三", 12), new TestBean("李四", 13), new TestBean("王五", 12)); final List<List<TestBean>> groupByField = CollUtil.groupByField(list, "age"); assertEquals("张三", groupByField.get(0).get(0).getName()); assertEquals("王五", groupByField.get(0).get(1).getName()); assertEquals("李四", groupByField.get(1).get(0).getName()); }
@Override protected String generatePoetStringTypes() { StringBuilder symbolBuilder = new StringBuilder(); if (getMethodReturnType().equals(theContract)) { symbolBuilder.append(" %L = %T."); } else { symbolBuilder.append("val %L = %L."); } symbolBuilder .append(method.getName()) .append("(") .append(getPoetFormatSpecifier()) .append(").send()"); return symbolBuilder.toString(); }
@Test public void testGenerateJavaPoetStringTypesWhenReturnTypeIsContract() { List<Method> listOfFilteredMethods = MethodFilter.extractValidMethods(greeterContractClass); Method deploy = listOfFilteredMethods.stream() .filter(m -> m.getName().equals("deploy")) .collect(Collectors.toList()) .get(0); KotlinParser parser = new KotlinParser(greeterContractClass, deploy, new KotlinMappingHelper()); assertEquals(" %L = %T.deploy(%L, %L, %L, %S).send()", parser.generatePoetStringTypes()); }
@Override public boolean remRole(Role role) { return Objects.nonNull(roles.remove(role)); }
@Test void remRole() { var core = new CustomerCore(); core.addRole(Role.BORROWER); var bRole = core.getRole(Role.BORROWER, BorrowerRole.class); assertTrue(bRole.isPresent()); assertTrue(core.remRole(Role.BORROWER)); var empt = core.getRole(Role.BORROWER, BorrowerRole.class); assertFalse(empt.isPresent()); }
private boolean updateWorkingSlots() { // Compute delta that will be checked to update slot number per storage path and // record new value of `Config.schedule_slot_num_per_path`. int cappedVal = Config.tablet_sched_slot_num_per_path < MIN_SLOT_PER_PATH ? MIN_SLOT_PER_PATH : (Config.tablet_sched_slot_num_per_path > MAX_SLOT_PER_PATH ? MAX_SLOT_PER_PATH : Config.tablet_sched_slot_num_per_path); int delta = 0; int oldSlotPerPathConfig = currentSlotPerPathConfig; if (currentSlotPerPathConfig != 0) { delta = cappedVal - currentSlotPerPathConfig; } currentSlotPerPathConfig = cappedVal; ImmutableMap<Long, Backend> backends = GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().getIdToBackend(); if (backends == null) { return false; } for (Backend backend : backends.values()) { if (!backend.hasPathHash() && backend.isAlive()) { // when upgrading, backend may not get path info yet. so return false and wait for next round. // and we should check if backend is alive. If backend is dead when upgrading, this backend // will never report its path hash, and tablet scheduler is blocked. LOG.info("not all backends have path info"); return false; } } // update exist backends Set<Long> deletedBeIds = Sets.newHashSet(); for (Long beId : backendsWorkingSlots.keySet()) { if (backends.containsKey(beId)) { Backend backend = backends.get(beId); if (backend == null) { continue; } ImmutableMap<String, DiskInfo> disks = backend.getDisks(); if (disks == null) { continue; } List<Long> pathHashes = disks.values().stream() .filter(DiskInfo::canReadWrite) .map(DiskInfo::getPathHash).collect(Collectors.toList()); backendsWorkingSlots.get(beId).updatePaths(pathHashes, currentSlotPerPathConfig); } else { deletedBeIds.add(beId); } } // delete non-exist backends for (Long beId : deletedBeIds) { backendsWorkingSlots.remove(beId); LOG.info("delete non exist backend: {}", beId); } // add new backends for (Backend be : backends.values()) { if (!backendsWorkingSlots.containsKey(be.getId())) { List<Long> pathHashes = be.getDisks().values().stream().map(DiskInfo::getPathHash).collect(Collectors.toList()); PathSlot slot = new PathSlot(pathHashes, currentSlotPerPathConfig); backendsWorkingSlots.put(be.getId(), slot); LOG.info("add new backend {} with slots num: {}", be.getId(), be.getDisks().size()); } } if (delta != 0) { LOG.info("Going to update slots per path. delta: {}, before: {}", delta, oldSlotPerPathConfig); int finalDelta = delta; backendsWorkingSlots.forEach((beId, pathSlot) -> pathSlot.updateSlot(finalDelta)); } return true; }
@Test public void testUpdateWorkingSlots() throws NoSuchMethodException, InvocationTargetException, IllegalAccessException, SchedException { TDisk td11 = new TDisk("/path11", 1L, 2L, true); td11.setPath_hash(11); TDisk td12 = new TDisk("/path12", 1L, 2L, true); td12.setPath_hash(12); Map<String, TDisk> backendDisks1 = new HashMap<>(); backendDisks1.put("/path11", td11); backendDisks1.put("/path12", td12); Backend be1 = new Backend(1, "192.168.0.1", 9030); be1.setAlive(true); be1.updateDisks(backendDisks1); systemInfoService.addBackend(be1); TDisk td21 = new TDisk("/path21", 1L, 2L, true); td21.setPath_hash(21); TDisk td22 = new TDisk("/path22", 1L, 2L, true); td22.setPath_hash(22); Map<String, TDisk> backendDisks2 = new HashMap<>(); backendDisks2.put("/path21", td21); backendDisks2.put("/path22", td22); Backend be2 = new Backend(2, "192.168.0.2", 9030); be2.updateDisks(backendDisks2); be2.setAlive(true); systemInfoService.addBackend(be2); TabletScheduler tabletScheduler = new TabletScheduler(tabletSchedulerStat); Method m = TabletScheduler.class.getDeclaredMethod("updateWorkingSlots", null); m.setAccessible(true); m.invoke(tabletScheduler, null); Map<Long, TabletScheduler.PathSlot> bslots = tabletScheduler.getBackendsWorkingSlots(); Assert.assertEquals(Config.tablet_sched_slot_num_per_path, bslots.get(1L).peekSlot(11)); Assert.assertEquals(Config.tablet_sched_slot_num_per_path, bslots.get(2L).peekSlot(22)); long result = takeSlotNTimes(Config.tablet_sched_slot_num_per_path, bslots.get(1L), 11L); Assert.assertEquals(11, result); result = takeSlotNTimes(1, bslots.get(1L), 11L); Assert.assertEquals(-1, result); freeSlotNTimes(Config.tablet_sched_slot_num_per_path, bslots.get(1L), 11L); Assert.assertEquals(Config.tablet_sched_slot_num_per_path, bslots.get(1L).getSlotTotal(11)); updateSlotWithNewConfig(128, m, tabletScheduler); // test max slot Assert.assertEquals(TabletScheduler.MAX_SLOT_PER_PATH, bslots.get(1L).getSlotTotal(11)); Assert.assertEquals(TabletScheduler.MAX_SLOT_PER_PATH, bslots.get(1L).peekSlot(11)); updateSlotWithNewConfig(0, m, tabletScheduler); // test min slot Assert.assertEquals(TabletScheduler.MIN_SLOT_PER_PATH, bslots.get(1L).peekSlot(11)); Assert.assertEquals(TabletScheduler.MIN_SLOT_PER_PATH, bslots.get(2L).peekSlot(22)); takeSlotNTimes(10, bslots.get(1L), 11L); // not enough, can only get 2 free slot takeSlotNTimes(10, bslots.get(2L), 21L); // not enough, can only get 2 free slot Assert.assertEquals(0, bslots.get(1L).peekSlot(11)); Assert.assertEquals(0, bslots.get(2L).peekSlot(21)); Assert.assertEquals(TabletScheduler.MIN_SLOT_PER_PATH, bslots.get(1L).getSlotTotal(11)); updateSlotWithNewConfig(2, m, tabletScheduler); Assert.assertEquals(0, bslots.get(1L).peekSlot(11)); Assert.assertEquals(TabletScheduler.MIN_SLOT_PER_PATH, bslots.get(1L).peekSlot(12)); updateSlotWithNewConfig(4, m, tabletScheduler); Assert.assertEquals(2, bslots.get(2L).peekSlot(21)); Assert.assertEquals(4, bslots.get(2L).peekSlot(22)); Assert.assertEquals(4, bslots.get(1L).getSlotTotal(11)); takeSlotNTimes(5, bslots.get(1L), 11); // not enough, can only get 2 free slot updateSlotWithNewConfig(2, m, tabletScheduler); // decrease total slot // this is normal because slot taken haven't return Assert.assertEquals(-2, bslots.get(1L).peekSlot(11)); Assert.assertEquals(2, bslots.get(1L).peekSlot(12)); Assert.assertEquals(0, bslots.get(2L).peekSlot(21)); freeSlotNTimes(2, bslots.get(1L), 11L); Assert.assertEquals(0, bslots.get(1L).peekSlot(11)); freeSlotNTimes(2, bslots.get(1L), 11L); Assert.assertEquals(bslots.get(1L).peekSlot(11), bslots.get(1L).getSlotTotal(11)); }
@Override public boolean canHandleReturnType(Class<?> returnType) { return rxSupportedTypes.stream().anyMatch(classType -> classType.isAssignableFrom(returnType)); }
@Test public void testCheckTypes() { assertThat(rxJava3TimeLimiterAspectExt.canHandleReturnType(Flowable.class)).isTrue(); assertThat(rxJava3TimeLimiterAspectExt.canHandleReturnType(Single.class)).isTrue(); assertThat(rxJava3TimeLimiterAspectExt.canHandleReturnType(Observable.class)).isTrue(); assertThat(rxJava3TimeLimiterAspectExt.canHandleReturnType(Completable.class)).isTrue(); assertThat(rxJava3TimeLimiterAspectExt.canHandleReturnType(Maybe.class)).isTrue(); }
@Override public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException { if(file.isRoot()) { return PathAttributes.EMPTY; } if(new DefaultPathContainerService().isContainer(file)) { return PathAttributes.EMPTY; } final Path query; if(file.isPlaceholder()) { query = new Path(file.getParent(), FilenameUtils.removeExtension(file.getName()), file.getType(), file.attributes()); } else { query = file; } final AttributedList<Path> list; if(new SimplePathPredicate(DriveHomeFinderService.SHARED_DRIVES_NAME).test(file.getParent())) { list = new DriveTeamDrivesListService(session, fileid).list(file.getParent(), listener); } else { list = new FileidDriveListService(session, fileid, query).list(file.getParent(), listener); } final Path found = list.find(new ListFilteringFeature.ListFilteringPredicate(session.getCaseSensitivity(), file)); if(null == found) { throw new NotfoundException(file.getAbsolute()); } return found.attributes(); }
@Test public void testMissingShortcutTarget() throws Exception { final Path test = new Path(DriveHomeFinderService.MYDRIVE_FOLDER, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); final DriveFileIdProvider fileid = new DriveFileIdProvider(session); new DriveTouchFeature(session, fileid).touch(test, new TransferStatus()); final DriveAttributesFinderFeature f = new DriveAttributesFinderFeature(session, fileid); final PathAttributes attributes = f.find(test); final File shortcut = session.getClient().files().create(new File() .setName(new AlphanumericRandomStringService().random()) .setMimeType("application/vnd.google-apps.shortcut") .setShortcutDetails(new File.ShortcutDetails() .setTargetMimeType("text/plain") .setTargetId(fileid.getFileId(test)) ) ).execute(); assertEquals(attributes, f.find(new Path(DriveHomeFinderService.MYDRIVE_FOLDER, shortcut.getName(), EnumSet.of(Path.Type.file)))); session.getClient().files().delete(fileid.getFileId(test)) .setSupportsAllDrives(PreferencesFactory.get().getBoolean("googledrive.teamdrive.enable")).execute(); try { f.find(new Path(DriveHomeFinderService.MYDRIVE_FOLDER, shortcut.getName(), EnumSet.of(Path.Type.file))); } catch(NotfoundException e) { // Expected. Can no longer resolve shortcut } final AttributedList<Path> list = new DriveListService(session, fileid).list(DriveHomeFinderService.MYDRIVE_FOLDER, new DisabledListProgressListener()); assertFalse(list.contains(test)); assertNull(list.find(new SimplePathPredicate(new Path(DriveHomeFinderService.MYDRIVE_FOLDER, shortcut.getName(), EnumSet.of(Path.Type.file))))); session.getClient().files().delete(shortcut.getId()) .setSupportsAllDrives(PreferencesFactory.get().getBoolean("googledrive.teamdrive.enable")).execute(); }
@Override public void onHeartbeatSuccess(ShareGroupHeartbeatResponseData response) { if (response.errorCode() != Errors.NONE.code()) { String errorMessage = String.format( "Unexpected error in Heartbeat response. Expected no error, but received: %s", Errors.forCode(response.errorCode()) ); throw new IllegalArgumentException(errorMessage); } MemberState state = state(); if (state == MemberState.LEAVING) { log.debug("Ignoring heartbeat response received from broker. Member {} with epoch {} is " + "already leaving the group.", memberId, memberEpoch); return; } if (state == MemberState.UNSUBSCRIBED && maybeCompleteLeaveInProgress()) { log.debug("Member {} with epoch {} received a successful response to the heartbeat " + "to leave the group and completed the leave operation. ", memberId, memberEpoch); return; } if (isNotInGroup()) { log.debug("Ignoring heartbeat response received from broker. Member {} is in {} state" + " so it's not a member of the group. ", memberId, state); return; } // Update the group member id label in the client telemetry reporter if the member id has // changed. Initially the member id is empty, and it is updated when the member joins the // group. This is done here to avoid updating the label on every heartbeat response. Also // check if the member id is null, as the schema defines it as nullable. if (response.memberId() != null && !response.memberId().equals(memberId)) { clientTelemetryReporter.ifPresent(reporter -> reporter.updateMetricsLabels( Collections.singletonMap(ClientTelemetryProvider.GROUP_MEMBER_ID, response.memberId()))); } this.memberId = response.memberId(); updateMemberEpoch(response.memberEpoch()); ShareGroupHeartbeatResponseData.Assignment assignment = response.assignment(); if (assignment != null) { if (!state.canHandleNewAssignment()) { // New assignment received but member is in a state where it cannot take new // assignments (ex. preparing to leave the group) log.debug("Ignoring new assignment {} received from server because member is in {} state.", assignment, state); return; } Map<Uuid, SortedSet<Integer>> newAssignment = new HashMap<>(); assignment.topicPartitions().forEach(topicPartition -> newAssignment.put(topicPartition.topicId(), new TreeSet<>(topicPartition.partitions()))); processAssignmentReceived(newAssignment); } }
@Test public void testListenersGetNotifiedOfMemberEpochUpdatesOnlyIfItChanges() { ShareMembershipManager membershipManager = createMembershipManagerJoiningGroup(); MemberStateListener listener = mock(MemberStateListener.class); membershipManager.registerStateListener(listener); int epoch = 5; membershipManager.onHeartbeatSuccess(new ShareGroupHeartbeatResponseData() .setErrorCode(Errors.NONE.code()) .setMemberId(MEMBER_ID) .setMemberEpoch(epoch)); verify(listener).onMemberEpochUpdated(Optional.of(epoch), Optional.of(MEMBER_ID)); clearInvocations(listener); membershipManager.onHeartbeatSuccess(new ShareGroupHeartbeatResponseData() .setErrorCode(Errors.NONE.code()) .setMemberId(MEMBER_ID) .setMemberEpoch(epoch)); verify(listener, never()).onMemberEpochUpdated(any(), any()); }
@Override public void destroy() { throw new UnsupportedOperationException(); }
@Test(expected = UnsupportedOperationException.class) public void testDestroy() { context.destroy(); }
public ClassInfo get(Class<?> clz) { return get(clz.getClassLoader(), clz.getName()); }
@Test void getClazzNullCL() { ClassInfo ci = instance.get(null, String.class.getName()); assertNotNull(ci); }
public static Class<?> getArrayType(Class<?> componentType) { return Array.newInstance(componentType, 0).getClass(); }
@Test public void getArrayTypeTest() { Class<?> arrayType = ArrayUtil.getArrayType(int.class); assertSame(int[].class, arrayType); arrayType = ArrayUtil.getArrayType(String.class); assertSame(String[].class, arrayType); }
@Override public void handle(LogHandlerEvent event) { switch (event.getType()) { case APPLICATION_STARTED: LogHandlerAppStartedEvent appStartEvent = (LogHandlerAppStartedEvent) event; initApp(appStartEvent.getApplicationId(), appStartEvent.getUser(), appStartEvent.getCredentials(), appStartEvent.getApplicationAcls(), appStartEvent.getLogAggregationContext(), appStartEvent.getRecoveredAppLogInitedTime()); break; case CONTAINER_FINISHED: LogHandlerContainerFinishedEvent containerFinishEvent = (LogHandlerContainerFinishedEvent) event; stopContainer(containerFinishEvent.getContainerId(), containerFinishEvent.getContainerType(), containerFinishEvent.getExitCode()); break; case APPLICATION_FINISHED: LogHandlerAppFinishedEvent appFinishedEvent = (LogHandlerAppFinishedEvent) event; stopApp(appFinishedEvent.getApplicationId()); break; case LOG_AGG_TOKEN_UPDATE: checkAndEnableAppAggregators(); break; default: ; // Ignore } }
@Test public void testVerifyAndCreateRemoteDirsFailure() throws Exception { this.conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath()); this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, this.remoteRootLogDir.getAbsolutePath()); LogAggregationFileControllerFactory factory = new LogAggregationFileControllerFactory(conf); LogAggregationFileController logAggregationFileFormat = factory .getFileControllerForWrite(); LogAggregationFileController spyLogAggregationFileFormat = spy(logAggregationFileFormat); YarnRuntimeException e = new YarnRuntimeException("KABOOM!"); doThrow(e).doNothing().when(spyLogAggregationFileFormat) .verifyAndCreateRemoteLogDir(); LogAggregationService logAggregationService = spy( new LogAggregationService(dispatcher, this.context, this.delSrvc, super.dirsHandler) { @Override public LogAggregationFileController getLogAggregationFileController( Configuration conf) { return spyLogAggregationFileFormat; } }); logAggregationService.init(this.conf); logAggregationService.start(); // Now try to start an application ApplicationId appId = BuilderUtils.newApplicationId(System.currentTimeMillis(), (int) (Math.random() * 1000)); LogAggregationContext contextWithAMAndFailed = Records.newRecord(LogAggregationContext.class); contextWithAMAndFailed.setLogAggregationPolicyClassName( AMOrFailedContainerLogAggregationPolicy.class.getName()); logAggregationService.handle(new LogHandlerAppStartedEvent(appId, this.user, null, this.acls, contextWithAMAndFailed)); dispatcher.await(); // Verify that it failed ApplicationEvent[] expectedEvents = new ApplicationEvent[] { new ApplicationEvent(appId, ApplicationEventType.APPLICATION_LOG_HANDLING_FAILED) }; checkEvents(appEventHandler, expectedEvents, false, "getType", "getApplicationID", "getDiagnostic"); Mockito.reset(logAggregationService); // Now try to start another one ApplicationId appId2 = BuilderUtils.newApplicationId(System.currentTimeMillis(), (int) (Math.random() * 1000)); File appLogDir = new File(localLogDir, appId2.toString()); appLogDir.mkdir(); logAggregationService.handle(new LogHandlerAppStartedEvent(appId2, this.user, null, this.acls, contextWithAMAndFailed)); dispatcher.await(); // Verify that it worked expectedEvents = new ApplicationEvent[] { new ApplicationEvent(appId, // original failure ApplicationEventType.APPLICATION_LOG_HANDLING_FAILED), new ApplicationEvent(appId2, // success ApplicationEventType.APPLICATION_LOG_HANDLING_INITED) }; checkEvents(appEventHandler, expectedEvents, false, "getType", "getApplicationID", "getDiagnostic"); logAggregationService.stop(); }
protected String parseVersion(String output) { Matcher cdhMatcher = CDH_PATTERN.matcher(output); // Use CDH version if it is CDH if (cdhMatcher.find()) { String cdhVersion = cdhMatcher.group("cdhVersion"); return "cdh" + cdhVersion; } // Use Hadoop version otherwise String version = ""; Matcher matcher = HADOOP_PATTERN.matcher(output); if (matcher.find()) { version = matcher.group("version"); } return version; }
@Test public void versionParsing() { String versionStr = "Hadoop 2.7.2\n" + "Subversion https://git-wip-us.apache.org/repos/asf/hadoop.git " + "-r b165c4fe8a74265c792ce23f546c64604acf0e41\n" + "Compiled by jenkins on 2016-01-26T00:08Z\n" + "Compiled with protoc 2.5.0\n" + "From source with checksum d0fda26633fa762bff87ec759ebe689c\n" + "This command was run using " + "/tmp/hadoop/share/hadoop/common/hadoop-common-2.7.2.jar"; HdfsVersionValidationTask task = new HdfsVersionValidationTask(CONF); String version = task.parseVersion(versionStr); assertEquals("2.7.2", version); }
@VisibleForTesting static boolean atMostOne(boolean... values) { boolean one = false; for (boolean value : values) { if (!one && value) { one = true; } else if (value) { return false; } } return true; }
@Test public void testAtMostOne() { assertTrue(atMostOne(true)); assertTrue(atMostOne(false)); assertFalse(atMostOne(true, true)); assertTrue(atMostOne(true, false)); assertTrue(atMostOne(false, true)); assertTrue(atMostOne(false, false)); assertFalse(atMostOne(true, true, true)); assertFalse(atMostOne(true, true, false)); assertFalse(atMostOne(true, false, true)); assertTrue(atMostOne(true, false, false)); assertFalse(atMostOne(false, true, true)); assertTrue(atMostOne(false, true, false)); assertTrue(atMostOne(false, false, true)); assertTrue(atMostOne(false, false, false)); }
@Override public HashMap<String, Object> loadVGroups() { try { File fileToLoad = new File(storePath); if (!fileToLoad.exists()) { try { // create new file to record vgroup mapping relationship boolean fileCreated = fileToLoad.createNewFile(); if (fileCreated) { LOGGER.info("New vgroup file created at path: " + storePath); } else { LOGGER.warn("Failed to create a new vgroup file at path: " + storePath); } } catch (IOException e) { LOGGER.error("Error while creating a new file: " + e.getMessage()); } } String fileContent = FileUtils.readFileToString(fileToLoad, "UTF-8"); if (!fileContent.isEmpty()) { ObjectMapper objectMapper = new ObjectMapper(); vGroupMapping = objectMapper.readValue(fileContent, new TypeReference<HashMap<String, Object>>() { }); } } catch (Exception e) { LOGGER.error("mapping relationship load failed! " + e); } return vGroupMapping; }
@Test public void testLoadVGroups() throws IOException { HashMap<String, Object> expectedMapping = new HashMap<>(); expectedMapping.put(VGROUP_NAME, UNIT); File file = new File(STORE_PATH); FileUtils.writeStringToFile(file, "{\"testVGroup\":\"testUnit\"}", StandardCharsets.UTF_8); HashMap<String, Object> actualMapping = fileVGroupMappingStoreManager.loadVGroups(); assertEquals(expectedMapping, actualMapping); }
public static List<Event> computeEventDiff(final Params params) { final List<Event> events = new ArrayList<>(); emitPerNodeDiffEvents(createBaselineParams(params), events); emitWholeClusterDiffEvent(createBaselineParams(params), events); emitDerivedBucketSpaceStatesDiffEvents(params, events); return events; }
@Test void group_down_to_clear_reason_emits_group_up_event() { final EventFixture fixture = EventFixture.createForNodes(3) .clusterStateBefore("distributor:3 storage:3 .2.s:d") .clusterStateAfter("distributor:3 storage:3") .storageNodeReasonBefore(2, NodeStateReason.GROUP_IS_DOWN); // But no after-reason. final List<Event> events = fixture.computeEventDiff(); assertThat(events.size(), equalTo(2)); assertThat(events, hasItem(allOf( eventForNode(storageNode(2)), nodeEventWithDescription("Altered node state in cluster state from 'D' to 'U'")))); assertThat(events, hasItem(allOf( eventForNode(storageNode(2)), eventTypeIs(NodeEvent.Type.CURRENT), nodeEventWithDescription("Group node availability has been restored")))); }
public String flattenSchema(StructType schema, String prefix) { final StringBuilder selectSQLQuery = new StringBuilder(); for (StructField field : schema.fields()) { final String fieldName = field.name(); // it is also possible to expand arrays by using Spark "expand" function. // As it can increase data size significantly we later pass additional property with a // list of arrays to expand. final String colName = prefix == null ? fieldName : (prefix + "." + fieldName); if (field.dataType().getClass().equals(StructType.class)) { selectSQLQuery.append(flattenSchema((StructType) field.dataType(), colName)); } else { selectSQLQuery.append(colName); selectSQLQuery.append(" as "); selectSQLQuery.append(colName.replace(".", "_")); } selectSQLQuery.append(","); } if (selectSQLQuery.length() > 0) { selectSQLQuery.deleteCharAt(selectSQLQuery.length() - 1); } return selectSQLQuery.toString(); }
@Test public void testFlatten() { FlatteningTransformer transformer = new FlatteningTransformer(); // Init StructField[] nestedStructFields = new StructField[] {new StructField("nestedIntColumn", DataTypes.IntegerType, true, Metadata.empty()), new StructField("nestedStringColumn", DataTypes.StringType, true, Metadata.empty()),}; StructField[] structFields = new StructField[] {new StructField("intColumn", DataTypes.IntegerType, true, Metadata.empty()), new StructField("stringColumn", DataTypes.StringType, true, Metadata.empty()), new StructField("nestedStruct", DataTypes.createStructType(nestedStructFields), true, Metadata.empty())}; StructType schema = new StructType(structFields); String flattenedSql = transformer.flattenSchema(schema, null); assertEquals("intColumn as intColumn,stringColumn as stringColumn," + "nestedStruct.nestedIntColumn as nestedStruct_nestedIntColumn," + "nestedStruct.nestedStringColumn as nestedStruct_nestedStringColumn", flattenedSql); }
@Override public Long sendSingleMail(String mail, Long userId, Integer userType, String templateCode, Map<String, Object> templateParams) { // 校验邮箱模版是否合法 MailTemplateDO template = validateMailTemplate(templateCode); // 校验邮箱账号是否合法 MailAccountDO account = validateMailAccount(template.getAccountId()); // 校验邮箱是否存在 mail = validateMail(mail); validateTemplateParams(template, templateParams); // 创建发送日志。如果模板被禁用,则不发送短信,只记录日志 Boolean isSend = CommonStatusEnum.ENABLE.getStatus().equals(template.getStatus()); String title = mailTemplateService.formatMailTemplateContent(template.getTitle(), templateParams); String content = mailTemplateService.formatMailTemplateContent(template.getContent(), templateParams); Long sendLogId = mailLogService.createMailLog(userId, userType, mail, account, template, content, templateParams, isSend); // 发送 MQ 消息,异步执行发送短信 if (isSend) { mailProducer.sendMailSendMessage(sendLogId, mail, account.getId(), template.getNickname(), title, content); } return sendLogId; }
@Test public void testSendSingleMail_successWhenSmsTemplateDisable() { // 准备参数 String mail = randomEmail(); Long userId = randomLongId(); Integer userType = randomEle(UserTypeEnum.values()).getValue(); String templateCode = RandomUtils.randomString(); Map<String, Object> templateParams = MapUtil.<String, Object>builder().put("code", "1234") .put("op", "login").build(); // mock MailTemplateService 的方法 MailTemplateDO template = randomPojo(MailTemplateDO.class, o -> { o.setStatus(CommonStatusEnum.DISABLE.getStatus()); o.setContent("验证码为{code}, 操作为{op}"); o.setParams(Lists.newArrayList("code", "op")); }); when(mailTemplateService.getMailTemplateByCodeFromCache(eq(templateCode))).thenReturn(template); String title = RandomUtils.randomString(); when(mailTemplateService.formatMailTemplateContent(eq(template.getTitle()), eq(templateParams))) .thenReturn(title); String content = RandomUtils.randomString(); when(mailTemplateService.formatMailTemplateContent(eq(template.getContent()), eq(templateParams))) .thenReturn(content); // mock MailAccountService 的方法 MailAccountDO account = randomPojo(MailAccountDO.class); when(mailAccountService.getMailAccountFromCache(eq(template.getAccountId()))).thenReturn(account); // mock MailLogService 的方法 Long mailLogId = randomLongId(); when(mailLogService.createMailLog(eq(userId), eq(userType), eq(mail), eq(account), eq(template), eq(content), eq(templateParams), eq(false))).thenReturn(mailLogId); // 调用 Long resultMailLogId = mailSendService.sendSingleMail(mail, userId, userType, templateCode, templateParams); // 断言 assertEquals(mailLogId, resultMailLogId); // 断言调用 verify(mailProducer, times(0)).sendMailSendMessage(anyLong(), anyString(), anyLong(), anyString(), anyString(), anyString()); }
public static RetStatus getRetStatusFromRequest(HttpHeaders headers, Integer statusCode, Throwable exception) { return getRetStatusFromRequest(headers, getDefaultRetStatus(statusCode, exception)); }
@Test public void testGetRetStatusFromRequest() { HttpHeaders headers = new HttpHeaders(); RetStatus ret = PolarisEnhancedPluginUtils.getRetStatusFromRequest(headers, RetStatus.RetFail); assertThat(ret).isEqualTo(RetStatus.RetFail); headers.set(HeaderConstant.INTERNAL_CALLEE_RET_STATUS, RetStatus.RetFlowControl.getDesc()); ret = PolarisEnhancedPluginUtils.getRetStatusFromRequest(headers, RetStatus.RetFail); assertThat(ret).isEqualTo(RetStatus.RetFlowControl); headers.set(HeaderConstant.INTERNAL_CALLEE_RET_STATUS, RetStatus.RetReject.getDesc()); ret = PolarisEnhancedPluginUtils.getRetStatusFromRequest(headers, RetStatus.RetFail); assertThat(ret).isEqualTo(RetStatus.RetReject); }
public String hash() { return mHash.get(); }
@Test public void hash() { String hash0 = mProperties.hash(); mProperties.set(mKeyWithValue, "new value"); String hash1 = mProperties.hash(); Assert.assertNotEquals(hash0, hash1); mProperties.remove(mKeyWithValue); String hash2 = mProperties.hash(); Assert.assertEquals(hash0, hash2); mProperties.set(mKeyWithValue, "new value"); String hash3 = mProperties.hash(); Assert.assertEquals(hash1, hash3); mProperties.set(mKeyWithValue, "updated new value"); String hash4 = mProperties.hash(); Assert.assertNotEquals(hash0, hash4); Assert.assertNotEquals(hash1, hash4); Assert.assertNotEquals(hash2, hash4); Assert.assertNotEquals(hash3, hash4); mProperties.set(mKeyWithoutValue, "value"); String hash5 = mProperties.hash(); Assert.assertNotEquals(hash0, hash5); Assert.assertNotEquals(hash1, hash5); Assert.assertNotEquals(hash2, hash5); Assert.assertNotEquals(hash3, hash5); Assert.assertNotEquals(hash4, hash5); }
@Override public void processElement(StreamRecord<T> element) throws Exception { writer.write(element.getValue()); }
@TestTemplate public void testTableWithoutSnapshot() throws Exception { try (OneInputStreamOperatorTestHarness<RowData, WriteResult> testHarness = createIcebergStreamWriter()) { assertThat(testHarness.extractOutputValues()).isEmpty(); } // Even if we closed the iceberg stream writer, there's no orphan data file. assertThat(scanDataFiles()).isEmpty(); try (OneInputStreamOperatorTestHarness<RowData, WriteResult> testHarness = createIcebergStreamWriter()) { testHarness.processElement(SimpleDataUtil.createRowData(1, "hello"), 1); // Still not emit the data file yet, because there is no checkpoint. assertThat(testHarness.extractOutputValues()).isEmpty(); } // Once we closed the iceberg stream writer, there will left an orphan data file. assertThat(scanDataFiles()).hasSize(1); }
public static <T> T handleSpringBinder(Environment environment, String prefix, Class<T> targetClass) { String prefixParam = prefix.endsWith(".") ? prefix.substring(0, prefix.length() - 1) : prefix; return Binder.get(environment).bind(prefixParam, Bindable.of(targetClass)).orElse(null); }
@Test void testHandleSpringBinder() { Map properties = PropertiesUtil.handleSpringBinder(environment, "nacos.prefix", Map.class); assertEquals(3, properties.size()); }
public Optional<RemoteLogSegmentMetadata> fetchRemoteLogSegmentMetadata(TopicPartition topicPartition, int epochForOffset, long offset) throws RemoteStorageException { Uuid topicId = topicIdByPartitionMap.get(topicPartition); if (topicId == null) { throw new KafkaException("No topic id registered for topic partition: " + topicPartition); } return remoteLogMetadataManager.remoteLogSegmentMetadata(new TopicIdPartition(topicId, topicPartition), epochForOffset, offset); }
@Test void testFetchRemoteLogSegmentMetadata() throws RemoteStorageException { remoteLogManager.startup(); remoteLogManager.onLeadershipChange( Collections.singleton(mockPartition(leaderTopicIdPartition)), Collections.singleton(mockPartition(followerTopicIdPartition)), topicIds); remoteLogManager.fetchRemoteLogSegmentMetadata(leaderTopicIdPartition.topicPartition(), 10, 100L); remoteLogManager.fetchRemoteLogSegmentMetadata(followerTopicIdPartition.topicPartition(), 20, 200L); verify(remoteLogMetadataManager) .remoteLogSegmentMetadata(eq(leaderTopicIdPartition), anyInt(), anyLong()); verify(remoteLogMetadataManager) .remoteLogSegmentMetadata(eq(followerTopicIdPartition), anyInt(), anyLong()); }
@Transactional public AttendeeLoginResponse login(String uuid, AttendeeLoginRequest request) { Meeting meeting = meetingRepository.findByUuid(uuid) .orElseThrow(() -> new MomoException(MeetingErrorCode.INVALID_UUID)); AttendeeName name = new AttendeeName(request.attendeeName()); AttendeePassword password = new AttendeePassword(request.password()); return attendeeRepository.findByMeetingAndName(meeting, name) .map(attendee -> verifyPassword(attendee, password)) .orElseGet(() -> signup(meeting, name, password)); }
@DisplayName("로그인 시 동일한 이름이 저장되어있지 않으면 새로 참가자를 생성한다.") @Test void createsNewAttendeeIfNameIsNotAlreadyExists() { AttendeeLoginRequest request = new AttendeeLoginRequest("harry", "1234"); long initialCount = attendeeRepository.count(); attendeeService.login(meeting.getUuid(), request); long finalCount = attendeeRepository.count(); assertThat(finalCount).isEqualTo(initialCount + 1); }
@Override public boolean canHandleReturnType(Class returnType) { return (Flux.class.isAssignableFrom(returnType)) || (Mono.class .isAssignableFrom(returnType)); }
@Test public void testCheckTypes() { assertThat(reactorBulkheadAspectExt.canHandleReturnType(Mono.class)).isTrue(); assertThat(reactorBulkheadAspectExt.canHandleReturnType(Flux.class)).isTrue(); }
@Beta public static Application fromBuilder(Builder builder) throws Exception { return builder.build(); }
@Test void config() throws Exception { try ( ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .documentProcessor("docproc", "default", MockDocproc.class) .config(new MockApplicationConfig(new MockApplicationConfig.Builder() .mystruct(new MockApplicationConfig.Mystruct.Builder().id("structid").value("structvalue")) .mystructlist(new MockApplicationConfig.Mystructlist.Builder().id("listid1").value("listvalue1")) .mystructlist(new MockApplicationConfig.Mystructlist.Builder().id("listid2").value("listvalue2")) .mylist("item1") .mylist("item2") .mymap("key1", "value1") .mymap("key2", "value2") .mymapstruct("key1", new MockApplicationConfig.Mymapstruct.Builder().id("mapid1").value("mapvalue1")) .mymapstruct("key2", new MockApplicationConfig.Mymapstruct.Builder().id("mapid2").value("mapvalue2"))))))) ) { MockDocproc docproc = (MockDocproc) app.getComponentById("docproc@default"); assertNotNull(docproc); // struct assertEquals(docproc.getConfig().mystruct().id(), "structid"); assertEquals(docproc.getConfig().mystruct().value(), "structvalue"); // struct list assertEquals(docproc.getConfig().mystructlist().size(), 2); assertEquals(docproc.getConfig().mystructlist().get(0).id(), "listid1"); assertEquals(docproc.getConfig().mystructlist().get(0).value(), "listvalue1"); assertEquals(docproc.getConfig().mystructlist().get(1).id(), "listid2"); assertEquals(docproc.getConfig().mystructlist().get(1).value(), "listvalue2"); // list assertEquals(docproc.getConfig().mylist().size(), 2); assertEquals(docproc.getConfig().mylist().get(0), "item1"); assertEquals(docproc.getConfig().mylist().get(1), "item2"); // map assertEquals(docproc.getConfig().mymap().size(), 2); assertTrue(docproc.getConfig().mymap().containsKey("key1")); assertEquals(docproc.getConfig().mymap().get("key1"), "value1"); assertTrue(docproc.getConfig().mymap().containsKey("key2")); assertEquals(docproc.getConfig().mymap().get("key2"), "value2"); // map struct assertEquals(docproc.getConfig().mymapstruct().size(), 2); assertTrue(docproc.getConfig().mymapstruct().containsKey("key1")); assertEquals(docproc.getConfig().mymapstruct().get("key1").id(), "mapid1"); assertEquals(docproc.getConfig().mymapstruct().get("key1").value(), "mapvalue1"); assertTrue(docproc.getConfig().mymapstruct().containsKey("key2")); assertEquals(docproc.getConfig().mymapstruct().get("key2").id(), "mapid2"); assertEquals(docproc.getConfig().mymapstruct().get("key2").value(), "mapvalue2"); } }
int log2Floor(long n) { if (n < 0) { throw new IllegalArgumentException("must be non-negative"); } return n == 0 ? -1 : LongMath.log2(n, RoundingMode.FLOOR); }
@Test public void testLog2Floor_negative() { OrderedCode orderedCode = new OrderedCode(); try { orderedCode.log2Floor(-1); fail("Expected an IllegalArgumentException."); } catch (IllegalArgumentException expected) { // Expected! } }